diff --git a/.drone/drone.jsonnet b/.drone/drone.jsonnet index 0a4d2c66ef9d0..96acc3cd2b5bf 100644 --- a/.drone/drone.jsonnet +++ b/.drone/drone.jsonnet @@ -160,14 +160,14 @@ local promtail_win() = pipeline('promtail-windows') { steps: [ { name: 'identify-runner', - image: 'golang:1.19-windowsservercore-1809', + image: 'golang:1.21.3-windowsservercore-1809', commands: [ 'Write-Output $env:DRONE_RUNNER_NAME', ], }, { name: 'test', - image: 'golang:1.19-windowsservercore-1809', + image: 'golang:1.21.3-windowsservercore-1809', commands: [ 'go test .\\clients\\pkg\\promtail\\targets\\windows\\... -v', ], diff --git a/.drone/drone.yml b/.drone/drone.yml index eb6d9b8720e15..d4d91d2424339 100644 --- a/.drone/drone.yml +++ b/.drone/drone.yml @@ -1666,11 +1666,11 @@ platform: steps: - commands: - Write-Output $env:DRONE_RUNNER_NAME - image: golang:1.19-windowsservercore-1809 + image: golang:1.21.3-windowsservercore-1809 name: identify-runner - commands: - go test .\clients\pkg\promtail\targets\windows\... -v - image: golang:1.19-windowsservercore-1809 + image: golang:1.21.3-windowsservercore-1809 name: test trigger: ref: @@ -2106,6 +2106,6 @@ kind: secret name: gpg_private_key --- kind: signature -hmac: caf375427f92f78711f801f56341357b67737330e906346ee908a796c61dd314 +hmac: 8ae9cff1a379503d0b568f727d9c12bcb486a5e8d1fc3271deea32f07939baf1 ... diff --git a/docs/sources/configure/_index.md b/docs/sources/configure/_index.md index 1523f2454e19b..f6fb4aefe9182 100644 --- a/docs/sources/configure/_index.md +++ b/docs/sources/configure/_index.md @@ -335,14 +335,24 @@ grpc_tls_config: # CLI flag: -server.register-instrumentation [register_instrumentation: | default = true] +# If set to true, gRPC statuses will be reported in instrumentation labels with +# their string representations. Otherwise, they will be reported as "error". +# CLI flag: -server.report-grpc-codes-in-instrumentation-label-enabled +[report_grpc_codes_in_instrumentation_label_enabled: | default = false] + # Timeout for graceful shutdowns # CLI flag: -server.graceful-shutdown-timeout [graceful_shutdown_timeout: | default = 30s] -# Read timeout for HTTP server +# Read timeout for entire HTTP request, including headers and body. # CLI flag: -server.http-read-timeout [http_server_read_timeout: | default = 30s] +# Read timeout for HTTP request headers. If set to 0, value of +# -server.http-read-timeout is used. +# CLI flag: -server.http-read-header-timeout +[http_server_read_header_timeout: | default = 0s] + # Write timeout for HTTP server # CLI flag: -server.http-write-timeout [http_server_write_timeout: | default = 30s] @@ -351,6 +361,11 @@ grpc_tls_config: # CLI flag: -server.http-idle-timeout [http_server_idle_timeout: | default = 2m] +# Log closed connections that did not receive any response, most likely because +# client didn't send any request within timeout. +# CLI flag: -server.http-log-closed-connections-without-response-enabled +[http_log_closed_connections_without_response_enabled: | default = false] + # Limit on the size of a gRPC message this server can receive (bytes). # CLI flag: -server.grpc-max-recv-msg-size-bytes [grpc_server_max_recv_msg_size: | default = 4194304] diff --git a/go.mod b/go.mod index 7a82419ec5d6a..cf7b393e8b44d 100644 --- a/go.mod +++ b/go.mod @@ -24,7 +24,7 @@ require ( github.com/cespare/xxhash v1.1.0 github.com/cespare/xxhash/v2 v2.2.0 github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf - github.com/cristalhq/hedgedhttp v0.7.2 + github.com/cristalhq/hedgedhttp v0.9.1 github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc github.com/docker/docker v24.0.7+incompatible github.com/docker/go-plugins-helpers v0.0.0-20211224144127-6eecb7beb651 @@ -45,11 +45,11 @@ require ( github.com/golang/snappy v0.0.4 github.com/google/go-cmp v0.5.9 github.com/google/renameio/v2 v2.0.0 - github.com/google/uuid v1.3.0 + github.com/google/uuid v1.3.1 github.com/gorilla/mux v1.8.0 github.com/gorilla/websocket v1.5.0 github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2 - github.com/grafana/dskit v0.0.0-20231017083947-7b512eb54d47 + github.com/grafana/dskit v0.0.0-20231120170505-765e343eda4f github.com/grafana/go-gelf/v2 v2.0.1 github.com/grafana/gomemcache v0.0.0-20231023152154-6947259a0586 github.com/grafana/regexp v0.0.0-20221122212121-6b5c0a4cb7fd @@ -104,7 +104,7 @@ require ( golang.org/x/sys v0.13.0 golang.org/x/time v0.3.0 google.golang.org/api v0.132.0 - google.golang.org/grpc v1.58.3 + google.golang.org/grpc v1.59.0 gopkg.in/alecthomas/kingpin.v2 v2.2.6 gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v3 v3.0.1 @@ -135,7 +135,7 @@ require ( go.opentelemetry.io/collector/pdata v1.0.0-rcv0015 go4.org/netipx v0.0.0-20230125063823-8449b0a6169f golang.org/x/exp v0.0.0-20230801115018-d63ba01acd4b - golang.org/x/oauth2 v0.10.0 + golang.org/x/oauth2 v0.11.0 golang.org/x/text v0.13.0 google.golang.org/protobuf v1.31.0 k8s.io/apimachinery v0.28.1 @@ -316,7 +316,7 @@ require ( golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 // indirect gopkg.in/fsnotify/fsnotify.v1 v1.4.7 // indirect gopkg.in/inf.v0 v0.9.1 // indirect diff --git a/go.sum b/go.sum index 4bdb7d2237462..42fd5c822b694 100644 --- a/go.sum +++ b/go.sum @@ -464,8 +464,8 @@ github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7Do github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11 h1:07n33Z8lZxZ2qwegKbObQohDhXDQxiMMz1NOUGYlesw= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/cristalhq/hedgedhttp v0.7.2 h1:RbQacI2n+1fIOslNq/pjgOfBe1RfjAa7hqHpojopCic= -github.com/cristalhq/hedgedhttp v0.7.2/go.mod h1:XkqWU6qVMutbhW68NnzjWrGtH8NUx1UfYqGYtHVKIsI= +github.com/cristalhq/hedgedhttp v0.9.1 h1:g68L9cf8uUyQKQJwciD0A1Vgbsz+QgCjuB1I8FAsCDs= +github.com/cristalhq/hedgedhttp v0.9.1/go.mod h1:XkqWU6qVMutbhW68NnzjWrGtH8NUx1UfYqGYtHVKIsI= github.com/d4l3k/messagediff v1.2.1 h1:ZcAIMYsUg0EAp9X+tt8/enBE/Q8Yd5kzPynLyKptt9U= github.com/d4l3k/messagediff v1.2.1/go.mod h1:Oozbb1TVXFac9FtSIxHBMnBCq2qeH/2KkEQxENCrlLo= github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -939,8 +939,9 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= +github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.1.0/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8= github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM= @@ -981,8 +982,8 @@ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWm github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2 h1:qhugDMdQ4Vp68H0tp/0iN17DM2ehRo1rLEdOFe/gB8I= github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2/go.mod h1:w/aiO1POVIeXUQyl0VQSZjl5OAGDTL5aX+4v0RA1tcw= -github.com/grafana/dskit v0.0.0-20231017083947-7b512eb54d47 h1:wRtcM7fvzg/MJ4KCIYLryadp2fI3pO61BEiY7SizCoI= -github.com/grafana/dskit v0.0.0-20231017083947-7b512eb54d47/go.mod h1:byPCvaG/pqi33Kq+Wvkp7WhLfmrlyy0RAoYG4yRh01I= +github.com/grafana/dskit v0.0.0-20231120170505-765e343eda4f h1:gyojr97YeWZ70pKNakWv5/tKwBHuLy3icnIeCo9gQr4= +github.com/grafana/dskit v0.0.0-20231120170505-765e343eda4f/go.mod h1:8dsy5tQOkeNQyjXpm5mQsbCu3H5uzeBD35MzRQFznKU= github.com/grafana/go-gelf/v2 v2.0.1 h1:BOChP0h/jLeD+7F9mL7tq10xVkDG15he3T1zHuQaWak= github.com/grafana/go-gelf/v2 v2.0.1/go.mod h1:lexHie0xzYGwCgiRGcvZ723bSNyNI8ZRD4s0CLobh90= github.com/grafana/gocql v0.0.0-20200605141915-ba5dc39ece85 h1:xLuzPoOzdfNb/RF/IENCw+oLVdZB4G21VPhkHBgwSHY= @@ -2024,8 +2025,8 @@ golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7Lm golang.org/x/oauth2 v0.0.0-20220622183110-fd043fe589d2/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE= golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= golang.org/x/oauth2 v0.0.0-20220909003341-f21342109be1/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg= -golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= -golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= +golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU= +golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2473,8 +2474,8 @@ google.golang.org/genproto v0.0.0-20220919141832-68c03719ef51/go.mod h1:0Nb8Qy+S google.golang.org/genproto v0.0.0-20220921223823-23cae91e6737/go.mod h1:2r/26NEF3bFmT3eC3aZreahSal0C3Shl8Gi6vyDYqOQ= google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb h1:XFBgcDwm7irdHTbz4Zk2h7Mh+eis4nfJEFQFYzJzuIA= google.golang.org/genproto v0.0.0-20230913181813-007df8e322eb/go.mod h1:yZTlhN0tQnXo3h00fuXNCxJdLdIdnVFVBaRJ5LWBbw4= -google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 h1:nIgk/EEq3/YlnmVVXVnm14rC2oxgs1o0ong4sD/rd44= -google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5/go.mod h1:5DZzOUPCLYL3mNkQ0ms0F3EuUNZ7py1Bqeq6sxzI7/Q= +google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d h1:DoPTO70H+bcDXcd39vOqb2viZxgqeBeSGtZ55yZU4/Q= +google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk= google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13 h1:N3bU/SQDCDyD6R528GJ/PwW9KjYcJA3dgyH+MovAkIM= google.golang.org/genproto/googleapis/rpc v0.0.0-20230920204549-e6e6cdab5c13/go.mod h1:KSqppvjFjtoCI+KGd4PELB0qLNxdJHRGqRI09mB6pQA= google.golang.org/grpc v1.12.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= @@ -2519,8 +2520,8 @@ google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACu google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.48.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk= google.golang.org/grpc v1.49.0/go.mod h1:ZgQEeidpAuNRZ8iRrlBKXZQP1ghovWIVhdJRyCDK+GI= -google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ= -google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0= +google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= +google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/pkg/lokifrontend/frontend/transport/handler.go b/pkg/lokifrontend/frontend/transport/handler.go index 06f1ebe1c7b63..2a4781b3bc718 100644 --- a/pkg/lokifrontend/frontend/transport/handler.go +++ b/pkg/lokifrontend/frontend/transport/handler.go @@ -15,7 +15,6 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/grafana/dskit/httpgrpc" - "github.com/grafana/dskit/httpgrpc/server" "github.com/grafana/dskit/user" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" @@ -241,7 +240,7 @@ func writeError(w http.ResponseWriter, err error) { err = errRequestEntityTooLarge } } - server.WriteError(w, err) + httpgrpc.WriteError(w, err) } func writeServiceTimingHeader(queryResponseTime time.Duration, headers http.Header, stats *querier_stats.Stats) { @@ -277,7 +276,7 @@ func (a *grpcRoundTripperToHandlerAdapter) Do(ctx context.Context, req queryrang return nil, err } - grpcReq, err := server.HTTPRequest(httpReq) + grpcReq, err := httpgrpc.FromHTTPRequest(httpReq) if err != nil { return nil, fmt.Errorf("cannot convert HTTP request to gRPC request: %w", err) } diff --git a/pkg/lokifrontend/frontend/v2/frontend.go b/pkg/lokifrontend/frontend/v2/frontend.go index 4fe591a346a9b..695a054e42580 100644 --- a/pkg/lokifrontend/frontend/v2/frontend.go +++ b/pkg/lokifrontend/frontend/v2/frontend.go @@ -14,7 +14,6 @@ import ( "github.com/grafana/dskit/flagext" "github.com/grafana/dskit/grpcclient" "github.com/grafana/dskit/httpgrpc" - "github.com/grafana/dskit/httpgrpc/server" "github.com/grafana/dskit/netutil" "github.com/grafana/dskit/ring" "github.com/grafana/dskit/services" @@ -317,7 +316,7 @@ func (f *Frontend) Do(ctx context.Context, req queryrangebase.Request) (queryran return nil, fmt.Errorf("cannot convert request to HTTP request: %w", err) } - freq.request, err = server.HTTPRequest(httpReq) + freq.request, err = httpgrpc.FromHTTPRequest(httpReq) if err != nil { return nil, fmt.Errorf("cannot convert HTTP request to gRPC request: %w", err) } diff --git a/vendor/github.com/cristalhq/hedgedhttp/README.md b/vendor/github.com/cristalhq/hedgedhttp/README.md index aec2a1b3548d5..104213b350b12 100644 --- a/vendor/github.com/cristalhq/hedgedhttp/README.md +++ b/vendor/github.com/cristalhq/hedgedhttp/README.md @@ -10,7 +10,7 @@ Hedged HTTP client which helps to reduce tail latency at scale. ## Rationale -See paper [Tail at Scale](https://cacm.acm.org/magazines/2013/2/160173-the-tail-at-scale/fulltext) by Jeffrey Dean, Luiz André Barroso. In short: the client first sends one request, but then sends an additional request after a timeout if the previous hasn't returned an answer in the expected time. The client cancels remaining requests once the first result is received. +See paper [Tail at Scale](https://www.barroso.org/publications/TheTailAtScale.pdf) by Jeffrey Dean, Luiz André Barroso. In short: the client first sends one request, but then sends an additional request after a timeout if the previous hasn't returned an answer in the expected time. The client cancels remaining requests once the first result is received. ## Acknowledge diff --git a/vendor/github.com/cristalhq/hedgedhttp/hedged.go b/vendor/github.com/cristalhq/hedgedhttp/hedged.go index 56d65b0b1c44e..b7b33f50b89d3 100644 --- a/vendor/github.com/cristalhq/hedgedhttp/hedged.go +++ b/vendor/github.com/cristalhq/hedgedhttp/hedged.go @@ -12,6 +12,79 @@ import ( const infiniteTimeout = 30 * 24 * time.Hour // domain specific infinite +// Client represents a hedged HTTP client. +type Client struct { + rt http.RoundTripper + stats *Stats +} + +// Config for the [Client]. +type Config struct { + // Transport of the [Client]. + // Default is nil which results in [net/http.DefaultTransport]. + Transport http.RoundTripper + + // Upto says how much requests to make. + // Default is zero which means no hedged requests will be made. + Upto int + + // Delay before 2 consequitive hedged requests. + Delay time.Duration + + // Next returns the upto and delay for each HTTP that will be hedged. + // Default is nil which results in (Upto, Delay) result. + Next NextFn +} + +// NextFn represents a function that is called for each HTTP request for retrieving hedging options. +type NextFn func() (upto int, delay time.Duration) + +// New returns a new Client for the given config. +func New(cfg Config) (*Client, error) { + switch { + case cfg.Delay < 0: + return nil, errors.New("hedgedhttp: timeout cannot be negative") + case cfg.Upto < 0: + return nil, errors.New("hedgedhttp: upto cannot be negative") + } + if cfg.Transport == nil { + cfg.Transport = http.DefaultTransport + } + + rt, stats, err := NewRoundTripperAndStats(cfg.Delay, cfg.Upto, cfg.Transport) + if err != nil { + return nil, err + } + + // TODO(cristaloleg): this should be removed after internals cleanup. + rt2, ok := rt.(*hedgedTransport) + if !ok { + panic(fmt.Sprintf("want *hedgedTransport got %T", rt)) + } + rt2.next = cfg.Next + + c := &Client{ + rt: rt2, + stats: stats, + } + return c, nil +} + +// Stats returns statistics for the given client, see [Stats] methods. +func (c *Client) Stats() *Stats { + return c.stats +} + +// Do does the same as [RoundTrip], this method is presented to align with [net/http.Client]. +func (c *Client) Do(req *http.Request) (*http.Response, error) { + return c.rt.RoundTrip(req) +} + +// RoundTrip implements [net/http.RoundTripper] interface. +func (c *Client) RoundTrip(req *http.Request) (*http.Response, error) { + return c.rt.RoundTrip(req) +} + // NewClient returns a new http.Client which implements hedged requests pattern. // Given Client starts a new request after a timeout from previous request. // Starts no more than upto requests. @@ -63,8 +136,8 @@ func NewRoundTripperAndStats(timeout time.Duration, upto int, rt http.RoundTripp switch { case timeout < 0: return nil, nil, errors.New("hedgedhttp: timeout cannot be negative") - case upto < 1: - return nil, nil, errors.New("hedgedhttp: upto must be greater than 0") + case upto < 0: + return nil, nil, errors.New("hedgedhttp: upto cannot be negative") } if rt == nil { @@ -88,21 +161,35 @@ type hedgedTransport struct { rt http.RoundTripper timeout time.Duration upto int + next NextFn metrics *Stats } func (ht *hedgedTransport) RoundTrip(req *http.Request) (*http.Response, error) { mainCtx := req.Context() - timeout := ht.timeout + upto, timeout := ht.upto, ht.timeout + if ht.next != nil { + upto, timeout = ht.next() + } + + // no hedged requests, just a regular one. + if upto <= 0 { + return ht.rt.RoundTrip(req) + } + // rollback to default timeout. + if timeout < 0 { + timeout = ht.timeout + } + errOverall := &MultiError{} - resultCh := make(chan indexedResp, ht.upto) - errorCh := make(chan error, ht.upto) + resultCh := make(chan indexedResp, upto) + errorCh := make(chan error, upto) ht.metrics.requestedRoundTripsInc() resultIdx := -1 - cancels := make([]func(), ht.upto) + cancels := make([]func(), upto) defer runInPool(func() { for i, cancel := range cancels { @@ -113,8 +200,8 @@ func (ht *hedgedTransport) RoundTrip(req *http.Request) (*http.Response, error) } }) - for sent := 0; len(errOverall.Errors) < ht.upto; sent++ { - if sent < ht.upto { + for sent := 0; len(errOverall.Errors) < upto; sent++ { + if sent < upto { idx := sent subReq, cancel := reqWithCtx(req, mainCtx, idx != 0) cancels[idx] = cancel @@ -132,7 +219,7 @@ func (ht *hedgedTransport) RoundTrip(req *http.Request) (*http.Response, error) } // all request sent - effectively disabling timeout between requests - if sent == ht.upto { + if sent == upto { timeout = infiniteTimeout } resp, err := waitResult(mainCtx, resultCh, errorCh, timeout) @@ -140,6 +227,11 @@ func (ht *hedgedTransport) RoundTrip(req *http.Request) (*http.Response, error) switch { case resp.Resp != nil: resultIdx = resp.Index + if resultIdx == 0 { + ht.metrics.originalRequestWinsInc() + } else { + ht.metrics.hedgedRequestWinsInc() + } return resp.Resp, nil case mainCtx.Err() != nil: ht.metrics.canceledByUserRoundTripsInc() diff --git a/vendor/github.com/cristalhq/hedgedhttp/stats.go b/vendor/github.com/cristalhq/hedgedhttp/stats.go index fceeb234a22e5..f29331890826a 100644 --- a/vendor/github.com/cristalhq/hedgedhttp/stats.go +++ b/vendor/github.com/cristalhq/hedgedhttp/stats.go @@ -16,6 +16,8 @@ type Stats struct { requestedRoundTrips atomicCounter actualRoundTrips atomicCounter failedRoundTrips atomicCounter + originalRequestWins atomicCounter + hedgedRequestWins atomicCounter canceledByUserRoundTrips atomicCounter canceledSubRequests atomicCounter _ cacheLine @@ -24,6 +26,8 @@ type Stats struct { func (s *Stats) requestedRoundTripsInc() { atomic.AddUint64(&s.requestedRoundTrips.count, 1) } func (s *Stats) actualRoundTripsInc() { atomic.AddUint64(&s.actualRoundTrips.count, 1) } func (s *Stats) failedRoundTripsInc() { atomic.AddUint64(&s.failedRoundTrips.count, 1) } +func (s *Stats) originalRequestWinsInc() { atomic.AddUint64(&s.originalRequestWins.count, 1) } +func (s *Stats) hedgedRequestWinsInc() { atomic.AddUint64(&s.hedgedRequestWins.count, 1) } func (s *Stats) canceledByUserRoundTripsInc() { atomic.AddUint64(&s.canceledByUserRoundTrips.count, 1) } func (s *Stats) canceledSubRequestsInc() { atomic.AddUint64(&s.canceledSubRequests.count, 1) } @@ -42,6 +46,16 @@ func (s *Stats) FailedRoundTrips() uint64 { return atomic.LoadUint64(&s.failedRoundTrips.count) } +// OriginalRequestWins returns count of original requests that were faster than the original. +func (s *Stats) OriginalRequestWins() uint64 { + return atomic.LoadUint64(&s.originalRequestWins.count) +} + +// HedgedRequestWins returns count of hedged requests that were faster than the original. +func (s *Stats) HedgedRequestWins() uint64 { + return atomic.LoadUint64(&s.hedgedRequestWins.count) +} + // CanceledByUserRoundTrips returns count of requests that were canceled by user, using request context. func (s *Stats) CanceledByUserRoundTrips() uint64 { return atomic.LoadUint64(&s.canceledByUserRoundTrips.count) diff --git a/vendor/github.com/google/uuid/.travis.yml b/vendor/github.com/google/uuid/.travis.yml deleted file mode 100644 index d8156a60ba9b3..0000000000000 --- a/vendor/github.com/google/uuid/.travis.yml +++ /dev/null @@ -1,9 +0,0 @@ -language: go - -go: - - 1.4.3 - - 1.5.3 - - tip - -script: - - go test -v ./... diff --git a/vendor/github.com/google/uuid/CHANGELOG.md b/vendor/github.com/google/uuid/CHANGELOG.md new file mode 100644 index 0000000000000..2bd78667afbb3 --- /dev/null +++ b/vendor/github.com/google/uuid/CHANGELOG.md @@ -0,0 +1,10 @@ +# Changelog + +## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18) + + +### Bug Fixes + +* Use .EqualFold() to parse urn prefixed UUIDs ([#118](https://github.com/google/uuid/issues/118)) ([574e687](https://github.com/google/uuid/commit/574e6874943741fb99d41764c705173ada5293f0)) + +## Changelog diff --git a/vendor/github.com/google/uuid/CONTRIBUTING.md b/vendor/github.com/google/uuid/CONTRIBUTING.md index 04fdf09f136bb..5566888726d98 100644 --- a/vendor/github.com/google/uuid/CONTRIBUTING.md +++ b/vendor/github.com/google/uuid/CONTRIBUTING.md @@ -2,6 +2,22 @@ We definitely welcome patches and contribution to this project! +### Tips + +Commits must be formatted according to the [Conventional Commits Specification](https://www.conventionalcommits.org). + +Always try to include a test case! If it is not possible or not necessary, +please explain why in the pull request description. + +### Releasing + +Commits that would precipitate a SemVer change, as desrcibed in the Conventional +Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action) +to create a release candidate pull request. Once submitted, `release-please` +will create a release. + +For tips on how to work with `release-please`, see its documentation. + ### Legal requirements In order to protect both you and ourselves, you will need to sign the diff --git a/vendor/github.com/google/uuid/README.md b/vendor/github.com/google/uuid/README.md index f765a46f91501..3e9a61889de48 100644 --- a/vendor/github.com/google/uuid/README.md +++ b/vendor/github.com/google/uuid/README.md @@ -1,6 +1,6 @@ -# uuid ![build status](https://travis-ci.org/google/uuid.svg?branch=master) +# uuid The uuid package generates and inspects UUIDs based on -[RFC 4122](http://tools.ietf.org/html/rfc4122) +[RFC 4122](https://datatracker.ietf.org/doc/html/rfc4122) and DCE 1.1: Authentication and Security Services. This package is based on the github.com/pborman/uuid package (previously named @@ -9,10 +9,12 @@ a UUID is a 16 byte array rather than a byte slice. One loss due to this change is the ability to represent an invalid UUID (vs a NIL UUID). ###### Install -`go get github.com/google/uuid` +```sh +go get github.com/google/uuid +``` ###### Documentation -[![GoDoc](https://godoc.org/github.com/google/uuid?status.svg)](http://godoc.org/github.com/google/uuid) +[![Go Reference](https://pkg.go.dev/badge/github.com/google/uuid.svg)](https://pkg.go.dev/github.com/google/uuid) Full `go doc` style documentation for the package can be viewed online without installing this package by using the GoDoc site here: diff --git a/vendor/github.com/google/uuid/node_js.go b/vendor/github.com/google/uuid/node_js.go index 24b78edc90710..b2a0bc8711b3d 100644 --- a/vendor/github.com/google/uuid/node_js.go +++ b/vendor/github.com/google/uuid/node_js.go @@ -7,6 +7,6 @@ package uuid // getHardwareInterface returns nil values for the JS version of the code. -// This remvoves the "net" dependency, because it is not used in the browser. +// This removes the "net" dependency, because it is not used in the browser. // Using the "net" library inflates the size of the transpiled JS code by 673k bytes. func getHardwareInterface(name string) (string, []byte) { return "", nil } diff --git a/vendor/github.com/google/uuid/uuid.go b/vendor/github.com/google/uuid/uuid.go index a57207aeb6fd8..a56138cc4bd04 100644 --- a/vendor/github.com/google/uuid/uuid.go +++ b/vendor/github.com/google/uuid/uuid.go @@ -69,7 +69,7 @@ func Parse(s string) (UUID, error) { // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx case 36 + 9: - if strings.ToLower(s[:9]) != "urn:uuid:" { + if !strings.EqualFold(s[:9], "urn:uuid:") { return uuid, fmt.Errorf("invalid urn prefix: %q", s[:9]) } s = s[9:] @@ -101,7 +101,8 @@ func Parse(s string) (UUID, error) { 9, 11, 14, 16, 19, 21, - 24, 26, 28, 30, 32, 34} { + 24, 26, 28, 30, 32, 34, + } { v, ok := xtob(s[x], s[x+1]) if !ok { return uuid, errors.New("invalid UUID format") @@ -117,7 +118,7 @@ func ParseBytes(b []byte) (UUID, error) { switch len(b) { case 36: // xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx case 36 + 9: // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - if !bytes.Equal(bytes.ToLower(b[:9]), []byte("urn:uuid:")) { + if !bytes.EqualFold(b[:9], []byte("urn:uuid:")) { return uuid, fmt.Errorf("invalid urn prefix: %q", b[:9]) } b = b[9:] @@ -145,7 +146,8 @@ func ParseBytes(b []byte) (UUID, error) { 9, 11, 14, 16, 19, 21, - 24, 26, 28, 30, 32, 34} { + 24, 26, 28, 30, 32, 34, + } { v, ok := xtob(b[x], b[x+1]) if !ok { return uuid, errors.New("invalid UUID format") diff --git a/vendor/github.com/grafana/dskit/concurrency/buffer.go b/vendor/github.com/grafana/dskit/concurrency/buffer.go index 623b9a707612f..b8da4423f10e8 100644 --- a/vendor/github.com/grafana/dskit/concurrency/buffer.go +++ b/vendor/github.com/grafana/dskit/concurrency/buffer.go @@ -24,3 +24,10 @@ func (sb *SyncBuffer) String() string { return sb.buf.String() } + +func (sb *SyncBuffer) Reset() { + sb.mu.Lock() + defer sb.mu.Unlock() + + sb.buf.Reset() +} diff --git a/vendor/github.com/grafana/dskit/concurrency/worker.go b/vendor/github.com/grafana/dskit/concurrency/worker.go new file mode 100644 index 0000000000000..f40f0334800b7 --- /dev/null +++ b/vendor/github.com/grafana/dskit/concurrency/worker.go @@ -0,0 +1,38 @@ +package concurrency + +// NewReusableGoroutinesPool creates a new worker pool with the given size. +// These workers will run the workloads passed through Go() calls. +// If all workers are busy, Go() will spawn a new goroutine to run the workload. +func NewReusableGoroutinesPool(size int) *ReusableGoroutinesPool { + p := &ReusableGoroutinesPool{ + jobs: make(chan func()), + } + for i := 0; i < size; i++ { + go func() { + for f := range p.jobs { + f() + } + }() + } + return p +} + +type ReusableGoroutinesPool struct { + jobs chan func() +} + +// Go will run the given function in a worker of the pool. +// If all workers are busy, Go() will spawn a new goroutine to run the workload. +func (p *ReusableGoroutinesPool) Go(f func()) { + select { + case p.jobs <- f: + default: + go f() + } +} + +// Close stops the workers of the pool. +// No new Do() calls should be performed after calling Close(). +// Close does NOT wait for all jobs to finish, it is the caller's responsibility to ensure that in the provided workloads. +// Close is intended to be used in tests to ensure that no goroutines are leaked. +func (p *ReusableGoroutinesPool) Close() { close(p.jobs) } diff --git a/vendor/github.com/grafana/dskit/grpcclient/instrumentation.go b/vendor/github.com/grafana/dskit/grpcclient/instrumentation.go index 4a10ce48d27a8..280f02180c3e9 100644 --- a/vendor/github.com/grafana/dskit/grpcclient/instrumentation.go +++ b/vendor/github.com/grafana/dskit/grpcclient/instrumentation.go @@ -9,14 +9,14 @@ import ( "github.com/grafana/dskit/middleware" ) -func Instrument(requestDuration *prometheus.HistogramVec) ([]grpc.UnaryClientInterceptor, []grpc.StreamClientInterceptor) { +func Instrument(requestDuration *prometheus.HistogramVec, instrumentationLabelOptions ...middleware.InstrumentationOption) ([]grpc.UnaryClientInterceptor, []grpc.StreamClientInterceptor) { return []grpc.UnaryClientInterceptor{ otgrpc.OpenTracingClientInterceptor(opentracing.GlobalTracer()), middleware.ClientUserHeaderInterceptor, - middleware.UnaryClientInstrumentInterceptor(requestDuration), + middleware.UnaryClientInstrumentInterceptor(requestDuration, instrumentationLabelOptions...), }, []grpc.StreamClientInterceptor{ otgrpc.OpenTracingStreamClientInterceptor(opentracing.GlobalTracer()), middleware.StreamClientUserHeaderInterceptor, - middleware.StreamClientInstrumentInterceptor(requestDuration), + middleware.StreamClientInstrumentInterceptor(requestDuration, instrumentationLabelOptions...), } } diff --git a/vendor/github.com/grafana/dskit/grpcutil/cancel.go b/vendor/github.com/grafana/dskit/grpcutil/cancel.go deleted file mode 100644 index b1d369d2a3ea8..0000000000000 --- a/vendor/github.com/grafana/dskit/grpcutil/cancel.go +++ /dev/null @@ -1,25 +0,0 @@ -// Provenance-includes-location: https://github.com/weaveworks/common/blob/main/grpc/cancel.go -// Provenance-includes-license: Apache-2.0 -// Provenance-includes-copyright: Weaveworks Ltd. - -package grpcutil - -import ( - "context" - "errors" - - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// IsCanceled checks whether an error comes from an operation being canceled -func IsCanceled(err error) bool { - if errors.Is(err, context.Canceled) { - return true - } - s, ok := status.FromError(err) - if ok && s.Code() == codes.Canceled { - return true - } - return false -} diff --git a/vendor/github.com/grafana/dskit/grpcutil/status.go b/vendor/github.com/grafana/dskit/grpcutil/status.go new file mode 100644 index 0000000000000..a9e9aab249a34 --- /dev/null +++ b/vendor/github.com/grafana/dskit/grpcutil/status.go @@ -0,0 +1,70 @@ +package grpcutil + +import ( + "context" + "errors" + + "github.com/gogo/status" + "google.golang.org/grpc/codes" + grpcstatus "google.golang.org/grpc/status" +) + +// ErrorToStatus returns a *github.com/gogo/status.Status representation of err. +// +// - If err implements the method `GRPCStatus() *google.golang.org/grpc/status.Status` and +// `GRPCStatus()` does not return nil, or if err wraps a type satisfying this, Status from +// `GRPCStatus()` is converted to gogo Status, and returned. In that case, ok is true. +// +// - If err is or GRPCStatus() returns nil, a nil Status is returned and ok is false. +// +// - Otherwise, err is an error not compatible with this function. In this +// case, a nil Status is returned and ok is false. +func ErrorToStatus(err error) (*status.Status, bool) { + if err == nil { + return nil, false + } + type grpcStatus interface{ GRPCStatus() *grpcstatus.Status } + var gs grpcStatus + if errors.As(err, &gs) { + st := gs.GRPCStatus() + if st == nil { + return nil, false + } + return status.FromGRPCStatus(st), true + } + return nil, false +} + +// ErrorToStatusCode extracts gRPC status code from error and returns it. +// +// - If err is nil, codes.OK is returned. +// +// - If err implements (or wraps error that implements) the method +// `GRPCStatus() *google.golang.org/grpc/status.Status`, and +// `GRPCStatus()` returns a non-nil status, code from the status +// is returned. +// +// - Otherwise code.Unknown is returned. +func ErrorToStatusCode(err error) codes.Code { + if err == nil { + return codes.OK + } + type grpcStatus interface{ GRPCStatus() *grpcstatus.Status } + var gs grpcStatus + if errors.As(err, &gs) { + st := gs.GRPCStatus() + if st != nil { + return st.Code() + } + } + return codes.Unknown +} + +// IsCanceled checks whether an error comes from an operation being canceled. +func IsCanceled(err error) bool { + if errors.Is(err, context.Canceled) { + return true + } + statusCode := ErrorToStatusCode(err) + return statusCode == codes.Canceled +} diff --git a/vendor/github.com/grafana/dskit/httpgrpc/httpgrpc.go b/vendor/github.com/grafana/dskit/httpgrpc/httpgrpc.go index 3012edd422ba6..e1f044d8650bb 100644 --- a/vendor/github.com/grafana/dskit/httpgrpc/httpgrpc.go +++ b/vendor/github.com/grafana/dskit/httpgrpc/httpgrpc.go @@ -5,19 +5,105 @@ package httpgrpc import ( + "bytes" "context" "fmt" + "io" + "net/http" "github.com/go-kit/log/level" - "google.golang.org/grpc/metadata" - spb "github.com/gogo/googleapis/google/rpc" "github.com/gogo/protobuf/types" "github.com/gogo/status" + "google.golang.org/grpc/metadata" + "github.com/grafana/dskit/grpcutil" "github.com/grafana/dskit/log" ) +const ( + MetadataMethod = "httpgrpc-method" + MetadataURL = "httpgrpc-url" +) + +// AppendRequestMetadataToContext appends metadata of HTTPRequest into gRPC metadata. +func AppendRequestMetadataToContext(ctx context.Context, req *HTTPRequest) context.Context { + return metadata.AppendToOutgoingContext(ctx, + MetadataMethod, req.Method, + MetadataURL, req.Url) +} + +type nopCloser struct { + *bytes.Buffer +} + +func (nopCloser) Close() error { return nil } + +// BytesBuffer returns the underlaying `bytes.buffer` used to build this io.ReadCloser. +func (n nopCloser) BytesBuffer() *bytes.Buffer { return n.Buffer } + +// FromHTTPRequest converts an ordinary http.Request into an httpgrpc.HTTPRequest +func FromHTTPRequest(r *http.Request) (*HTTPRequest, error) { + body, err := io.ReadAll(r.Body) + if err != nil { + return nil, err + } + return &HTTPRequest{ + Method: r.Method, + Url: r.RequestURI, + Body: body, + Headers: FromHeader(r.Header), + }, nil +} + +// ToHTTPRequest converts httpgrpc.HTTPRequest to http.Request. +func ToHTTPRequest(ctx context.Context, r *HTTPRequest) (*http.Request, error) { + req, err := http.NewRequest(r.Method, r.Url, nopCloser{Buffer: bytes.NewBuffer(r.Body)}) + if err != nil { + return nil, err + } + ToHeader(r.Headers, req.Header) + req = req.WithContext(ctx) + req.RequestURI = r.Url + req.ContentLength = int64(len(r.Body)) + return req, nil +} + +// WriteResponse converts an httpgrpc response to an HTTP one +func WriteResponse(w http.ResponseWriter, resp *HTTPResponse) error { + ToHeader(resp.Headers, w.Header()) + w.WriteHeader(int(resp.Code)) + _, err := w.Write(resp.Body) + return err +} + +// WriteError converts an httpgrpc error to an HTTP one +func WriteError(w http.ResponseWriter, err error) { + resp, ok := HTTPResponseFromError(err) + if ok { + _ = WriteResponse(w, resp) + } else { + http.Error(w, err.Error(), http.StatusInternalServerError) + } +} + +func ToHeader(hs []*Header, header http.Header) { + for _, h := range hs { + header[h.Key] = h.Values + } +} + +func FromHeader(hs http.Header) []*Header { + result := make([]*Header, 0, len(hs)) + for k, vs := range hs { + result = append(result, &Header{ + Key: k, + Values: vs, + }) + } + return result +} + // Errorf returns a HTTP gRPC error than is correctly forwarded over // gRPC, and can eventually be converted back to a HTTP response with // HTTPResponseFromError. @@ -44,7 +130,7 @@ func ErrorFromHTTPResponse(resp *HTTPResponse) error { // HTTPResponseFromError converts a grpc error into an HTTP response func HTTPResponseFromError(err error) (*HTTPResponse, bool) { - s, ok := status.FromError(err) + s, ok := grpcutil.ErrorToStatus(err) if !ok { return nil, false } @@ -62,15 +148,3 @@ func HTTPResponseFromError(err error) (*HTTPResponse, bool) { return &resp, true } - -const ( - MetadataMethod = "httpgrpc-method" - MetadataURL = "httpgrpc-url" -) - -// AppendRequestMetadataToContext appends metadata of HTTPRequest into gRPC metadata. -func AppendRequestMetadataToContext(ctx context.Context, req *HTTPRequest) context.Context { - return metadata.AppendToOutgoingContext(ctx, - MetadataMethod, req.Method, - MetadataURL, req.Url) -} diff --git a/vendor/github.com/grafana/dskit/httpgrpc/server/server.go b/vendor/github.com/grafana/dskit/httpgrpc/server/server.go index b0d808b7b75a1..c642f7fa13fda 100644 --- a/vendor/github.com/grafana/dskit/httpgrpc/server/server.go +++ b/vendor/github.com/grafana/dskit/httpgrpc/server/server.go @@ -5,10 +5,8 @@ package server import ( - "bytes" "context" "fmt" - "io" "net" "net/http" "net/http/httptest" @@ -27,6 +25,13 @@ import ( "github.com/grafana/dskit/middleware" ) +var ( + // DoNotLogErrorHeaderKey is a header key used for marking non-loggable errors. More precisely, if an HTTP response + // has a status code 5xx, and contains a header with key DoNotLogErrorHeaderKey and any values, the generated error + // will be marked as non-loggable. + DoNotLogErrorHeaderKey = http.CanonicalHeaderKey("X-DoNotLogError") +) + // Server implements HTTPServer. HTTPServer is a generated interface that gRPC // servers must implement. type Server struct { @@ -40,35 +45,34 @@ func NewServer(handler http.Handler) *Server { } } -type nopCloser struct { - *bytes.Buffer -} - -func (nopCloser) Close() error { return nil } - -// BytesBuffer returns the underlaying `bytes.buffer` used to build this io.ReadCloser. -func (n nopCloser) BytesBuffer() *bytes.Buffer { return n.Buffer } - // Handle implements HTTPServer. func (s Server) Handle(ctx context.Context, r *httpgrpc.HTTPRequest) (*httpgrpc.HTTPResponse, error) { - req, err := http.NewRequest(r.Method, r.Url, nopCloser{Buffer: bytes.NewBuffer(r.Body)}) + req, err := httpgrpc.ToHTTPRequest(ctx, r) if err != nil { return nil, err } - toHeader(r.Headers, req.Header) - req = req.WithContext(ctx) - req.RequestURI = r.Url - req.ContentLength = int64(len(r.Body)) recorder := httptest.NewRecorder() s.handler.ServeHTTP(recorder, req) + header := recorder.Header() + + doNotLogError := false + if _, ok := header[DoNotLogErrorHeaderKey]; ok { + doNotLogError = true + header.Del(DoNotLogErrorHeaderKey) // remove before converting to httpgrpc resp + } + resp := &httpgrpc.HTTPResponse{ Code: int32(recorder.Code), - Headers: fromHeader(recorder.Header()), + Headers: httpgrpc.FromHeader(header), Body: recorder.Body.Bytes(), } if recorder.Code/100 == 5 { - return nil, httpgrpc.ErrorFromHTTPResponse(resp) + err := httpgrpc.ErrorFromHTTPResponse(resp) + if doNotLogError { + err = middleware.DoNotLogError{Err: err} + } + return nil, err } return resp, nil } @@ -153,38 +157,6 @@ func NewClient(address string) (*Client, error) { }, nil } -// HTTPRequest wraps an ordinary HTTPRequest with a gRPC one -func HTTPRequest(r *http.Request) (*httpgrpc.HTTPRequest, error) { - body, err := io.ReadAll(r.Body) - if err != nil { - return nil, err - } - return &httpgrpc.HTTPRequest{ - Method: r.Method, - Url: r.RequestURI, - Body: body, - Headers: fromHeader(r.Header), - }, nil -} - -// WriteResponse converts an httpgrpc response to an HTTP one -func WriteResponse(w http.ResponseWriter, resp *httpgrpc.HTTPResponse) error { - toHeader(resp.Headers, w.Header()) - w.WriteHeader(int(resp.Code)) - _, err := w.Write(resp.Body) - return err -} - -// WriteError converts an httpgrpc error to an HTTP one -func WriteError(w http.ResponseWriter, err error) { - resp, ok := httpgrpc.HTTPResponseFromError(err) - if ok { - _ = WriteResponse(w, resp) - } else { - http.Error(w, err.Error(), http.StatusInternalServerError) - } -} - // ServeHTTP implements http.Handler func (c *Client) ServeHTTP(w http.ResponseWriter, r *http.Request) { if tracer := opentracing.GlobalTracer(); tracer != nil { @@ -195,7 +167,7 @@ func (c *Client) ServeHTTP(w http.ResponseWriter, r *http.Request) { } } - req, err := HTTPRequest(r) + req, err := httpgrpc.FromHTTPRequest(r) if err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return @@ -212,25 +184,8 @@ func (c *Client) ServeHTTP(w http.ResponseWriter, r *http.Request) { } } - if err := WriteResponse(w, resp); err != nil { + if err := httpgrpc.WriteResponse(w, resp); err != nil { http.Error(w, err.Error(), http.StatusInternalServerError) return } } - -func toHeader(hs []*httpgrpc.Header, header http.Header) { - for _, h := range hs { - header[h.Key] = h.Values - } -} - -func fromHeader(hs http.Header) []*httpgrpc.Header { - result := make([]*httpgrpc.Header, 0, len(hs)) - for k, vs := range hs { - result = append(result, &httpgrpc.Header{ - Key: k, - Values: vs, - }) - } - return result -} diff --git a/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go b/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go index 30a27531fd08d..693964b5ad067 100644 --- a/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go +++ b/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go @@ -222,7 +222,7 @@ func generateRandomSuffix(logger log.Logger) string { // If joining of the cluster if configured, it is done in Running state, and if join fails and Abort flag is set, service // fails. type KV struct { - services.Service + services.NamedService cfg KVConfig logger log.Logger @@ -374,7 +374,8 @@ func NewKV(cfg KVConfig, logger log.Logger, dnsProvider DNSProvider, registerer mlkv.codecs[c.CodecID()] = c } - mlkv.Service = services.NewBasicService(mlkv.starting, mlkv.running, mlkv.stopping) + mlkv.NamedService = services.NewBasicService(mlkv.starting, mlkv.running, mlkv.stopping).WithName("memberlist_kv") + return mlkv } @@ -485,17 +486,17 @@ func (m *KV) running(ctx context.Context) error { tickerChan = t.C } + logger := log.With(m.logger, "phase", "periodic_rejoin") for { select { case <-tickerChan: - members := m.discoverMembers(ctx, m.cfg.JoinMembers) - - reached, err := m.memberlist.Join(members) + const numAttempts = 1 // don't retry if resolution fails, we will try again next time + reached, err := m.joinMembersWithRetries(ctx, numAttempts, logger) if err == nil { - level.Info(m.logger).Log("msg", "re-joined memberlist cluster", "reached_nodes", reached) + level.Info(logger).Log("msg", "re-joined memberlist cluster", "reached_nodes", reached) } else { // Don't report error from rejoin, otherwise KV service would be stopped completely. - level.Warn(m.logger).Log("msg", "re-joining memberlist cluster failed", "err", err) + level.Warn(logger).Log("msg", "re-joining memberlist cluster failed", "err", err, "next_try_in", m.cfg.RejoinInterval) } case <-ctx.Done(): @@ -540,7 +541,7 @@ func (m *KV) fastJoinMembersOnStartup(ctx context.Context) { level.Info(m.logger).Log("msg", "memberlist fast-join starting", "nodes_found", len(nodes), "to_join", toJoin) totalJoined := 0 - for toJoin > 0 && len(nodes) > 0 { + for toJoin > 0 && len(nodes) > 0 && ctx.Err() == nil { reached, err := m.memberlist.Join(nodes[0:1]) // Try to join single node only. if err != nil { level.Debug(m.logger).Log("msg", "fast-joining node failed", "node", nodes[0], "err", err) @@ -568,41 +569,122 @@ func (m *KV) joinMembersOnStartup(ctx context.Context) bool { return true } + logger := log.With(m.logger, "phase", "startup") + level.Info(logger).Log("msg", "joining memberlist cluster", "join_members", strings.Join(m.cfg.JoinMembers, ",")) startTime := time.Now() + reached, err := m.joinMembersWithRetries(ctx, m.cfg.MaxJoinRetries, logger) + if err != nil { + level.Error(logger).Log("msg", "joining memberlist cluster failed", "err", err, "elapsed_time", time.Since(startTime)) + return false + } + level.Info(logger).Log("msg", "joining memberlist cluster succeeded", "reached_nodes", reached, "elapsed_time", time.Since(startTime)) + return true +} - level.Info(m.logger).Log("msg", "joining memberlist cluster", "join_members", strings.Join(m.cfg.JoinMembers, ",")) - - cfg := backoff.Config{ - MinBackoff: m.cfg.MinJoinBackoff, - MaxBackoff: m.cfg.MaxJoinBackoff, - MaxRetries: m.cfg.MaxJoinRetries, +// joinMembersWithRetries joins m.cfg.JoinMembers 100 at a time. After each batch of 100 it rediscoveres the members. +// This helps when the list of members is big and by the time we reach the end the originally resolved addresses may be obsolete. +// joinMembersWithRetries returns an error iff it couldn't successfully join any node OR the context was cancelled. +func (m *KV) joinMembersWithRetries(ctx context.Context, numAttempts int, logger log.Logger) (int, error) { + var ( + cfg = backoff.Config{ + MinBackoff: m.cfg.MinJoinBackoff, + MaxBackoff: m.cfg.MaxJoinBackoff, + MaxRetries: numAttempts, + } + boff = backoff.New(ctx, cfg) + err error + successfullyJoined = 0 + ) + + for ; boff.Ongoing(); boff.Wait() { + successfullyJoined, err = m.joinMembersInBatches(ctx) + if successfullyJoined > 0 { + // If there are _some_ successful joins, then we can consider the join done. + // Mimicking the Join semantics we return an error only when we couldn't join any node at all + err = nil + break + } + level.Warn(logger).Log("msg", "joining memberlist cluster", "attempts", boff.NumRetries()+1, "max_attempts", numAttempts, "err", err) + } + if err == nil && boff.Err() != nil { + err = fmt.Errorf("joining memberlist: %w", boff.Err()) } - boff := backoff.New(ctx, cfg) - var lastErr error + return successfullyJoined, err +} - for boff.Ongoing() { - // We rejoin all nodes, including those that were joined during "fast-join". - // This is harmless and simpler. - nodes := m.discoverMembers(ctx, m.cfg.JoinMembers) +// joinMembersInBatches joins m.cfg.JoinMembers and re-resolves the address of m.cfg.JoinMembers after joining 100 nodes. +// joinMembersInBatches returns the number of nodes joined. joinMembersInBatches returns an error only when the +// number of joined nodes is 0. +func (m *KV) joinMembersInBatches(ctx context.Context) (int, error) { + const batchSize = 100 + var ( + attemptedNodes = make(map[string]bool) + successfullyJoined = 0 + lastErr error + batch = make([]string, batchSize) + nodes []string + ) + for moreAvailableNodes := true; ctx.Err() == nil && moreAvailableNodes; { + // Rediscover nodes and try to join a subset of them with each batch. + // When the list of nodes is large by the time we reach the end of the list some of the + // IPs can be unreachable. + newlyResolved := m.discoverMembers(ctx, m.cfg.JoinMembers) + if len(newlyResolved) > 0 { + // If the resolution fails we keep using the nodes list from the last resolution. + // If that failed too, then we fail the join attempt. + nodes = newlyResolved + } - if len(nodes) > 0 { - reached, err := m.memberlist.Join(nodes) // err is only returned if reached==0. - if err == nil { - level.Info(m.logger).Log("msg", "joining memberlist cluster succeeded", "reached_nodes", reached, "elapsed_time", time.Since(startTime)) - return true + // Prepare batch + batch = batch[:0] + moreAvailableNodes = false + for _, n := range nodes { + if attemptedNodes[n] { + continue } - level.Warn(m.logger).Log("msg", "joining memberlist cluster: failed to reach any nodes", "retries", boff.NumRetries(), "err", err) - lastErr = err - } else { - level.Warn(m.logger).Log("msg", "joining memberlist cluster: found no nodes to join", "retries", boff.NumRetries()) + if len(batch) >= batchSize { + moreAvailableNodes = true + break + } + batch = append(batch, n) + attemptedNodes[n] = true } - boff.Wait() + // Join batch + joinedInBatch, err := m.joinMembersBatch(ctx, batch) + if err != nil { + lastErr = err + } + successfullyJoined += joinedInBatch + } + if successfullyJoined > 0 { + return successfullyJoined, nil + } + if successfullyJoined == 0 && lastErr == nil { + return 0, errors.New("found no nodes to join") } + return 0, lastErr +} - level.Error(m.logger).Log("msg", "joining memberlist cluster failed", "last_error", lastErr, "elapsed_time", time.Since(startTime)) - return false +// joinMembersBatch returns an error only if it couldn't successfully join any nodes or if ctx is cancelled. +func (m *KV) joinMembersBatch(ctx context.Context, nodes []string) (successfullyJoined int, lastErr error) { + for nodeIdx := range nodes { + if ctx.Err() != nil { + return successfullyJoined, fmt.Errorf("joining batch: %w", context.Cause(ctx)) + } + // Attempt to join a single node. + // The cost of calling Join shouldn't be different between passing all nodes in one invocation versus passing a single node per invocation. + reached, err := m.memberlist.Join(nodes[nodeIdx : nodeIdx+1]) + successfullyJoined += reached + if err != nil { + lastErr = err + } + } + if successfullyJoined > 0 { + lastErr = nil + } + return successfullyJoined, lastErr } // Provides a dns-based member disovery to join a memberlist cluster w/o knowning members' addresses upfront. diff --git a/vendor/github.com/grafana/dskit/middleware/grpc_instrumentation.go b/vendor/github.com/grafana/dskit/middleware/grpc_instrumentation.go index 70069fa36fadd..e4052b8ed05ff 100644 --- a/vendor/github.com/grafana/dskit/middleware/grpc_instrumentation.go +++ b/vendor/github.com/grafana/dskit/middleware/grpc_instrumentation.go @@ -6,6 +6,7 @@ package middleware import ( "context" + "errors" "io" "strconv" "time" @@ -13,72 +14,69 @@ import ( "github.com/prometheus/client_golang/prometheus" "go.uber.org/atomic" "google.golang.org/grpc" + "google.golang.org/grpc/codes" "google.golang.org/grpc/metadata" "github.com/grafana/dskit/grpcutil" - "github.com/grafana/dskit/httpgrpc" "github.com/grafana/dskit/instrument" ) -func observe(ctx context.Context, hist *prometheus.HistogramVec, method string, err error, duration time.Duration) { - respStatus := "success" - if err != nil { - if errResp, ok := httpgrpc.HTTPResponseFromError(err); ok { - respStatus = strconv.Itoa(int(errResp.Code)) - } else if grpcutil.IsCanceled(err) { - respStatus = "cancel" - } else { - respStatus = "error" - } - } - instrument.ObserveWithExemplar(ctx, hist.WithLabelValues(gRPC, method, respStatus, "false"), duration.Seconds()) +func observe(ctx context.Context, hist *prometheus.HistogramVec, method string, err error, duration time.Duration, instrumentLabel instrumentationLabel) { + instrument.ObserveWithExemplar(ctx, hist.WithLabelValues(gRPC, method, instrumentLabel.getInstrumentationLabel(err), "false"), duration.Seconds()) } // UnaryServerInstrumentInterceptor instruments gRPC requests for errors and latency. -func UnaryServerInstrumentInterceptor(hist *prometheus.HistogramVec) grpc.UnaryServerInterceptor { +func UnaryServerInstrumentInterceptor(hist *prometheus.HistogramVec, instrumentationOptions ...InstrumentationOption) grpc.UnaryServerInterceptor { + instrumentationLabel := applyInstrumentationOptions(false, instrumentationOptions...) return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (interface{}, error) { begin := time.Now() resp, err := handler(ctx, req) - observe(ctx, hist, info.FullMethod, err, time.Since(begin)) + observe(ctx, hist, info.FullMethod, err, time.Since(begin), instrumentationLabel) return resp, err } } // StreamServerInstrumentInterceptor instruments gRPC requests for errors and latency. -func StreamServerInstrumentInterceptor(hist *prometheus.HistogramVec) grpc.StreamServerInterceptor { +func StreamServerInstrumentInterceptor(hist *prometheus.HistogramVec, instrumentationOptions ...InstrumentationOption) grpc.StreamServerInterceptor { + instrumentationLabel := applyInstrumentationOptions(false, instrumentationOptions...) return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { begin := time.Now() err := handler(srv, ss) - observe(ss.Context(), hist, info.FullMethod, err, time.Since(begin)) + observe(ss.Context(), hist, info.FullMethod, err, time.Since(begin), instrumentationLabel) return err } } // UnaryClientInstrumentInterceptor records duration of gRPC requests client side. -func UnaryClientInstrumentInterceptor(metric *prometheus.HistogramVec) grpc.UnaryClientInterceptor { +func UnaryClientInstrumentInterceptor(metric *prometheus.HistogramVec, instrumentationOptions ...InstrumentationOption) grpc.UnaryClientInterceptor { + // we enforce masking of HTTP statuses. + instrumentationLabel := applyInstrumentationOptions(true, instrumentationOptions...) return func(ctx context.Context, method string, req, resp interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { start := time.Now() err := invoker(ctx, method, req, resp, cc, opts...) - metric.WithLabelValues(method, errorCode(err)).Observe(time.Since(start).Seconds()) + metric.WithLabelValues(method, instrumentationLabel.getInstrumentationLabel(err)).Observe(time.Since(start).Seconds()) return err } } // StreamClientInstrumentInterceptor records duration of streaming gRPC requests client side. -func StreamClientInstrumentInterceptor(metric *prometheus.HistogramVec) grpc.StreamClientInterceptor { +func StreamClientInstrumentInterceptor(metric *prometheus.HistogramVec, instrumentationOptions ...InstrumentationOption) grpc.StreamClientInterceptor { + // we enforce masking of HTTP statuses. + instrumentationLabel := applyInstrumentationOptions(true, instrumentationOptions...) return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption, ) (grpc.ClientStream, error) { start := time.Now() stream, err := streamer(ctx, desc, cc, method, opts...) s := &instrumentedClientStream{ - metric: metric, - start: start, - method: method, - serverStreams: desc.ServerStreams, - finished: atomic.NewBool(false), - finishedChan: make(chan struct{}), - stream: stream, + metric: metric, + start: start, + method: method, + serverStreams: desc.ServerStreams, + finished: atomic.NewBool(false), + finishedChan: make(chan struct{}), + stream: stream, + instrumentationLabel: instrumentationLabel, } s.awaitCompletion(ctx) return s, err @@ -87,13 +85,14 @@ func StreamClientInstrumentInterceptor(metric *prometheus.HistogramVec) grpc.Str // This implementation is heavily inspired by github.com/opentracing-contrib/go-grpc's openTracingClientStream. type instrumentedClientStream struct { - metric *prometheus.HistogramVec - start time.Time - method string - serverStreams bool - finished *atomic.Bool - finishedChan chan struct{} - stream grpc.ClientStream + metric *prometheus.HistogramVec + start time.Time + method string + serverStreams bool + finished *atomic.Bool + finishedChan chan struct{} + stream grpc.ClientStream + instrumentationLabel instrumentationLabel } func (s *instrumentedClientStream) Trailer() metadata.MD { @@ -122,7 +121,7 @@ func (s *instrumentedClientStream) finish(err error) { close(s.finishedChan) - s.metric.WithLabelValues(s.method, errorCode(err)).Observe(time.Since(s.start).Seconds()) + s.metric.WithLabelValues(s.method, s.instrumentationLabel.getInstrumentationLabel(err)).Observe(time.Since(s.start).Seconds()) } func (s *instrumentedClientStream) SendMsg(m interface{}) error { @@ -173,18 +172,75 @@ func (s *instrumentedClientStream) CloseSend() error { return err } -// errorCode converts an error into an error code string. -func errorCode(err error) string { - if err == nil { - return "2xx" +type InstrumentationOption func(*instrumentationLabel) + +var ( + // ReportGRPCStatusOption is an InstrumentationOption that is used for enabling gRPC status codes to be used + // in instrumentation labels. + ReportGRPCStatusOption InstrumentationOption = func(instrumentationLabel *instrumentationLabel) { + instrumentationLabel.reportGRPCStatus = true + } +) + +func applyInstrumentationOptions(maskHTTPStatuses bool, options ...InstrumentationOption) instrumentationLabel { + instrumentationLabel := instrumentationLabel{maskHTTPStatus: maskHTTPStatuses} + for _, opt := range options { + opt(&instrumentationLabel) + } + return instrumentationLabel +} + +type instrumentationLabel struct { + reportGRPCStatus bool + maskHTTPStatus bool +} + +// getInstrumentationLabel converts an error into an error code string by applying the configurations +// contained in this instrumentationLabel object. +func (i *instrumentationLabel) getInstrumentationLabel(err error) string { + statusCode := errorToStatusCode(err) + return i.statusCodeToString(statusCode) +} + +func (i *instrumentationLabel) statusCodeToString(statusCode codes.Code) string { + if isHTTPStatusCode(statusCode) { + statusFamily := int(statusCode / 100) + if i.maskHTTPStatus { + return strconv.Itoa(statusFamily) + "xx" + } + return strconv.Itoa(int(statusCode)) } - if errResp, ok := httpgrpc.HTTPResponseFromError(err); ok { - statusFamily := int(errResp.Code / 100) - return strconv.Itoa(statusFamily) + "xx" - } else if grpcutil.IsCanceled(err) { + if i.reportGRPCStatus { + return statusCode.String() + } + + if statusCode == codes.OK { + if i.maskHTTPStatus { + return "2xx" + } + return "success" + } + + if statusCode == codes.Canceled { return "cancel" - } else { - return "error" } + + return "error" +} + +func errorToStatusCode(err error) codes.Code { + if err == nil { + return codes.OK + } + + if errors.Is(err, context.Canceled) { + return codes.Canceled + } + + return grpcutil.ErrorToStatusCode(err) +} + +func isHTTPStatusCode(statusCode codes.Code) bool { + return int(statusCode) >= 100 && int(statusCode) < 600 } diff --git a/vendor/github.com/grafana/dskit/middleware/grpc_logging.go b/vendor/github.com/grafana/dskit/middleware/grpc_logging.go index 7f5db7725c945..feab364743225 100644 --- a/vendor/github.com/grafana/dskit/middleware/grpc_logging.go +++ b/vendor/github.com/grafana/dskit/middleware/grpc_logging.go @@ -29,6 +29,12 @@ type OptionalLogging interface { ShouldLog(ctx context.Context, duration time.Duration) bool } +type DoNotLogError struct{ Err error } + +func (i DoNotLogError) Error() string { return i.Err.Error() } +func (i DoNotLogError) Unwrap() error { return i.Err } +func (i DoNotLogError) ShouldLog(_ context.Context, _ time.Duration) bool { return false } + // GRPCServerLog logs grpc requests, errors, and latency. type GRPCServerLog struct { Log log.Logger diff --git a/vendor/github.com/grafana/dskit/middleware/zero_response.go b/vendor/github.com/grafana/dskit/middleware/zero_response.go new file mode 100644 index 0000000000000..1bb4ecc8d1f6b --- /dev/null +++ b/vendor/github.com/grafana/dskit/middleware/zero_response.go @@ -0,0 +1,132 @@ +package middleware + +import ( + "errors" + "net" + "os" + "regexp" + "strconv" + "sync" + + "github.com/go-kit/log" + "go.uber.org/atomic" +) + +// NewZeroResponseListener returns a Listener that logs all connections that encountered io timeout on reads, and were closed before sending any response. +func NewZeroResponseListener(list net.Listener, log log.Logger) net.Listener { + return &zeroResponseListener{ + Listener: list, + log: log, + bufPool: sync.Pool{ + New: func() interface{} { return &bufHolder{buf: make([]byte, 0, requestBufSize)} }, + }, + } +} + +// Wrap a slice in a struct, so we can store a pointer in sync.Pool +type bufHolder struct { + buf []byte +} + +// Size of buffer for read data. We log this eventually. +const requestBufSize = 512 + +type zeroResponseListener struct { + net.Listener + log log.Logger + bufPool sync.Pool // pool of &bufHolder. +} + +func (zl *zeroResponseListener) Accept() (net.Conn, error) { + conn, err := zl.Listener.Accept() + if err != nil { + return nil, err + } + bh := zl.bufPool.Get().(*bufHolder) + bh.buf = bh.buf[:0] + return &zeroResponseConn{Conn: conn, log: zl.log, bufHolder: bh, returnPool: &zl.bufPool}, nil +} + +type zeroResponseConn struct { + net.Conn + + log log.Logger + once sync.Once + returnPool *sync.Pool + + bufHolderMux sync.Mutex + bufHolder *bufHolder // Buffer with first requestBufSize bytes from connection. Set to nil as soon as data is written to the connection. + + lastReadErrIsDeadlineExceeded atomic.Bool +} + +func (zc *zeroResponseConn) Read(b []byte) (n int, err error) { + n, err = zc.Conn.Read(b) + if err != nil && errors.Is(err, os.ErrDeadlineExceeded) { + zc.lastReadErrIsDeadlineExceeded.Store(true) + } else { + zc.lastReadErrIsDeadlineExceeded.Store(false) + } + + // Store first requestBufSize read bytes on connection into the buffer for logging. + if n > 0 { + zc.bufHolderMux.Lock() + defer zc.bufHolderMux.Unlock() + + if zc.bufHolder != nil { + rem := requestBufSize - len(zc.bufHolder.buf) // how much space is in our buffer. + if rem > n { + rem = n + } + if rem > 0 { + zc.bufHolder.buf = append(zc.bufHolder.buf, b[:rem]...) + } + } + } + return +} + +func (zc *zeroResponseConn) Write(b []byte) (n int, err error) { + n, err = zc.Conn.Write(b) + if n > 0 { + zc.bufHolderMux.Lock() + if zc.bufHolder != nil { + zc.returnPool.Put(zc.bufHolder) + zc.bufHolder = nil + } + zc.bufHolderMux.Unlock() + } + return +} + +var authRegexp = regexp.MustCompile(`((?i)\r\nauthorization:\s+)(\S+\s+)(\S+)`) + +func (zc *zeroResponseConn) Close() error { + err := zc.Conn.Close() + + zc.once.Do(func() { + zc.bufHolderMux.Lock() + defer zc.bufHolderMux.Unlock() + + // If buffer was already returned, it means there was some data written on the connection, nothing to do. + if zc.bufHolder == nil { + return + } + + // If we didn't write anything to this connection, and we've got timeout while reading data, it looks like + // slow a slow client failing to send a request to us. + if !zc.lastReadErrIsDeadlineExceeded.Load() { + return + } + + b := zc.bufHolder.buf + b = authRegexp.ReplaceAll(b, []byte("${1}${2}***")) // Replace value in Authorization header with ***. + + _ = zc.log.Log("msg", "read timeout, connection closed with no response", "read", strconv.Quote(string(b)), "remote", zc.RemoteAddr().String()) + + zc.returnPool.Put(zc.bufHolder) + zc.bufHolder = nil + }) + + return err +} diff --git a/vendor/github.com/grafana/dskit/modules/module_service.go b/vendor/github.com/grafana/dskit/modules/module_service.go index 8ca4e25714de4..a0fcdb876fcde 100644 --- a/vendor/github.com/grafana/dskit/modules/module_service.go +++ b/vendor/github.com/grafana/dskit/modules/module_service.go @@ -79,13 +79,19 @@ func (w *moduleService) start(serviceContext context.Context) error { // we don't want to let this service to stop until all dependant services are stopped, // so we use independent context here - level.Info(w.logger).Log("msg", "initialising", "module", w.name) + level.Info(w.logger).Log("msg", "starting", "module", w.name) err := w.service.StartAsync(context.Background()) if err != nil { return errors.Wrapf(err, "error starting module: %s", w.name) } - return w.service.AwaitRunning(serviceContext) + err = w.service.AwaitRunning(serviceContext) + if err != nil { + // Make sure that underlying service is stopped before returning + // (e.g. in case of context cancellation, AwaitRunning returns early, but service may still be starting). + _ = services.StopAndAwaitTerminated(context.Background(), w.service) + } + return errors.Wrapf(err, "starting module %s", w.name) } func (w *moduleService) run(serviceContext context.Context) error { diff --git a/vendor/github.com/grafana/dskit/ring/batch.go b/vendor/github.com/grafana/dskit/ring/batch.go index fa627445ed2c6..5acd8fd008620 100644 --- a/vendor/github.com/grafana/dskit/ring/batch.go +++ b/vendor/github.com/grafana/dskit/ring/batch.go @@ -8,7 +8,8 @@ import ( "sync" "go.uber.org/atomic" - "google.golang.org/grpc/status" + + grpcUtils "github.com/grafana/dskit/grpcutil" ) type batchTracker struct { @@ -25,40 +26,79 @@ type instance struct { } type itemTracker struct { - minSuccess int - maxFailures int - succeeded atomic.Int32 - failed4xx atomic.Int32 - failed5xx atomic.Int32 - remaining atomic.Int32 - err atomic.Error + minSuccess int + maxFailures int + succeeded atomic.Int32 + failedClient atomic.Int32 + failedServer atomic.Int32 + remaining atomic.Int32 + err atomic.Error } -func (i *itemTracker) recordError(err error) int32 { +func (i *itemTracker) recordError(err error, isClientError func(error) bool) int32 { i.err.Store(err) - if s, ok := status.FromError(err); ok && s.Code()/100 == 4 { - return i.failed4xx.Inc() + if isClientError(err) { + return i.failedClient.Inc() } + return i.failedServer.Inc() +} - return i.failed5xx.Inc() +func isHTTPStatus4xx(err error) bool { + code := grpcUtils.ErrorToStatusCode(err) + return code/100 == 4 } -// DoBatch request against a set of keys in the ring, handling replication and -// failures. For example if we want to write N items where they may all -// hit different instances, and we want them all replicated R ways with -// quorum writes, we track the relationship between batch RPCs and the items -// within them. -// -// Callback is passed the instance to target, and the indexes of the keys -// to send to that instance. +// DoBatch is a deprecated version of DoBatchWithOptions where grpc errors containing status codes 4xx are treated as client errors. +// Deprecated. Use DoBatchWithOptions instead. +func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callback func(InstanceDesc, []int) error, cleanup func()) error { + return DoBatchWithOptions(ctx, op, r, keys, callback, DoBatchOptions{ + Cleanup: cleanup, + IsClientError: isHTTPStatus4xx, + }) +} + +// DoBatchOptions defines options for the DoBatchWithOptions call. +// Zero value options are valid, as well as individual zero valued fields. +type DoBatchOptions struct { + // Cleanup is always called, either on an error before starting the batches or after they are all finished. + // If nil, a noop will be called. + Cleanup func() + + // IsClientError classifies errors returned by `callback()` into client or server errors. + // See `batchTracker.record()` function for details about how errors are combined into final error returned by DoBatchWithClientError. + // If nil, a default implementation is used that classifies grpc errors containing status codes 4xx as client errors. + IsClientError func(error) bool + + // Go will be used to spawn the callback goroutines, and can be used to use a worker pool like concurrency.ReusableGoroutinesPool. + Go func(func()) +} + +func (o *DoBatchOptions) replaceZeroValuesWithDefaults() { + if o.Cleanup == nil { + o.Cleanup = func() {} + } + if o.IsClientError == nil { + o.IsClientError = isHTTPStatus4xx + } + if o.Go == nil { + o.Go = func(f func()) { go f() } + } +} + +// DoBatchWithOptions request against a set of keys in the ring, handling replication and failures. +// For example if we want to write N items where they may all hit different instances, +// and we want them all replicated R ways with quorum writes, +// we track the relationship between batch RPCs and the items within them. // -// cleanup() is always called, either on an error before starting the batches or after they all finish. +// See comments on DoBatchOptions for available options for this call. // -// Not implemented as a method on Ring so we can test separately. -func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callback func(InstanceDesc, []int) error, cleanup func()) error { +// Not implemented as a method on Ring, so we can test separately. +func DoBatchWithOptions(ctx context.Context, op Operation, r ReadRing, keys []uint32, callback func(InstanceDesc, []int) error, o DoBatchOptions) error { + o.replaceZeroValuesWithDefaults() + if r.InstancesCount() <= 0 { - cleanup() + o.Cleanup() return fmt.Errorf("DoBatch: InstancesCount <= 0") } expectedTrackers := len(keys) * (r.ReplicationFactor() + 1) / r.InstancesCount() @@ -73,7 +113,7 @@ func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callb for i, key := range keys { replicationSet, err := r.Get(key, op, bufDescs[:0], bufHosts[:0], bufZones[:0]) if err != nil { - cleanup() + o.Cleanup() return err } itemTrackers[i].minSuccess = len(replicationSet.Instances) - replicationSet.MaxErrors @@ -104,19 +144,19 @@ func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callb wg.Add(len(instances)) for _, i := range instances { - go func(i instance) { + i := i + o.Go(func() { err := callback(i.desc, i.indexes) - tracker.record(i.itemTrackers, err) + tracker.record(i.itemTrackers, err, o.IsClientError) wg.Done() - }(i) + }) } // Perform cleanup at the end. - go func() { + o.Go(func() { wg.Wait() - - cleanup() - }() + o.Cleanup() + }) select { case err := <-tracker.err: @@ -128,35 +168,36 @@ func DoBatch(ctx context.Context, op Operation, r ReadRing, keys []uint32, callb } } -func (b *batchTracker) record(itemTrackers []*itemTracker, err error) { +func (b *batchTracker) record(itemTrackers []*itemTracker, err error, isClientError func(error) bool) { // If we reach the required number of successful puts on this item, then decrement the // number of pending items by one. // // The use of atomic increments here is needed as: // * rpcsPending and rpcsFailed guarantee only a single goroutine will write to either channel - // * succeeded, failed4xx, failed5xx and remaining guarantee that the "return decision" is made atomically + // * succeeded, failedClient, failedServer and remaining guarantee that the "return decision" is made atomically // avoiding race condition - for i := range itemTrackers { + for _, it := range itemTrackers { if err != nil { // Track the number of errors by error family, and if it exceeds maxFailures // shortcut the waiting rpc. - errCount := itemTrackers[i].recordError(err) + errCount := it.recordError(err, isClientError) // We should return an error if we reach the maxFailure (quorum) on a given error family OR - // we don't have any remaining instances to try. + // we don't have any remaining instances to try. In the following we use ClientError and ServerError + // to denote errors, for which isClientError() returns true and false respectively. // - // Ex: 2xx, 4xx, 5xx -> return 5xx - // Ex: 4xx, 4xx, _ -> return 4xx - // Ex: 5xx, _, 5xx -> return 5xx + // Ex: Success, ClientError, ServerError -> return ServerError + // Ex: ClientError, ClientError, Success -> return ClientError + // Ex: ServerError, Success, ServerError -> return ServerError // - // The reason for searching for quorum in 4xx and 5xx errors separately is to give a more accurate - // response to the initial request. So if a quorum of instances rejects the request with 4xx, then the request should be rejected - // even if less-than-quorum instances indicated a failure to process the request (via 5xx). + // The reason for searching for quorum in ClientError and ServerError errors separately is to give a more accurate + // response to the initial request. So if a quorum of instances rejects the request with ClientError, then the request should be rejected + // even if less-than-quorum instances indicated a failure to process the request (via ServerError). // The speculation is that had the unavailable instances been available, - // they would have rejected the request with a 4xx as well. - // Conversely, if a quorum of instances failed to process the request via 5xx and less-than-quorum - // instances rejected it with 4xx, then we do not have quorum to reject the request as a 4xx. Instead, - // we return the last 5xx error for debuggability. - if errCount > int32(itemTrackers[i].maxFailures) || itemTrackers[i].remaining.Dec() == 0 { + // they would have rejected the request with a ClientError as well. + // Conversely, if a quorum of instances failed to process the request via ServerError and less-than-quorum + // instances rejected it with ClientError, then we do not have quorum to reject the request as a ClientError. Instead, + // we return the last ServerError error for debuggability. + if errCount > int32(it.maxFailures) || it.remaining.Dec() == 0 { if b.rpcsFailed.Inc() == 1 { b.err <- err } @@ -164,7 +205,8 @@ func (b *batchTracker) record(itemTrackers []*itemTracker, err error) { } else { // If we successfully process items in minSuccess instances, // then wake up the waiting rpc, so it can return early. - if itemTrackers[i].succeeded.Inc() >= int32(itemTrackers[i].minSuccess) { + succeeded := it.succeeded.Inc() + if succeeded == int32(it.minSuccess) { if b.rpcsPending.Dec() == 0 { b.done <- struct{}{} } @@ -172,11 +214,12 @@ func (b *batchTracker) record(itemTrackers []*itemTracker, err error) { } // If we successfully called this particular instance, but we don't have any remaining instances to try, - // and we failed to call minSuccess instances, then we need to return the last error - // Ex: 4xx, 5xx, 2xx - if itemTrackers[i].remaining.Dec() == 0 { - if b.rpcsFailed.Inc() == 1 { - b.err <- itemTrackers[i].err.Load() + // and we failed to call minSuccess instances, then we need to return the last error. + if succeeded < int32(it.minSuccess) { + if it.remaining.Dec() == 0 { + if b.rpcsFailed.Inc() == 1 { + b.err <- it.err.Load() + } } } } diff --git a/vendor/github.com/grafana/dskit/ring/replication_set.go b/vendor/github.com/grafana/dskit/ring/replication_set.go index cc43331e44d95..f389f4766fc55 100644 --- a/vendor/github.com/grafana/dskit/ring/replication_set.go +++ b/vendor/github.com/grafana/dskit/ring/replication_set.go @@ -9,6 +9,7 @@ import ( kitlog "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/opentracing/opentracing-go/ext" "github.com/grafana/dskit/spanlogger" ) @@ -294,7 +295,7 @@ func DoUntilQuorumWithoutSuccessfulContextCancellation[T any](ctx context.Contex terminate := func(err error) ([]T, error) { if cfg.Logger != nil { - _ = cfg.Logger.Error(err) + ext.Error.Set(cfg.Logger.Span, true) } contextTracker.cancelAllContexts() @@ -325,7 +326,7 @@ func DoUntilQuorumWithoutSuccessfulContextCancellation[T any](ctx context.Contex resultsRemaining-- if result.err != nil && cfg.IsTerminalError != nil && cfg.IsTerminalError(result.err) { - level.Error(logger).Log("msg", "cancelling all outstanding requests because a terminal error occurred", "err", result.err) + level.Warn(logger).Log("msg", "cancelling all outstanding requests because a terminal error occurred", "err", result.err) // We must return before calling resultTracker.done() below, otherwise done() might start further requests if request minimisation is enabled. return terminate(result.err) } diff --git a/vendor/github.com/grafana/dskit/ring/util.go b/vendor/github.com/grafana/dskit/ring/util.go index b5ee485ef25c6..a21c0f2fe2cad 100644 --- a/vendor/github.com/grafana/dskit/ring/util.go +++ b/vendor/github.com/grafana/dskit/ring/util.go @@ -7,6 +7,7 @@ import ( "time" "github.com/go-kit/log" + "golang.org/x/exp/slices" "github.com/grafana/dskit/backoff" "github.com/grafana/dskit/netutil" @@ -127,9 +128,11 @@ func getZones(tokens map[string][]uint32) []string { // searchToken returns the offset of the tokens entry holding the range for the provided key. func searchToken(tokens []uint32, key uint32) int { - i := sort.Search(len(tokens), func(x int) bool { - return tokens[x] > key - }) + i, found := slices.BinarySearch(tokens, key) + if found { + // we want the first token > key, not >= key + i = i + 1 + } if i >= len(tokens) { i = 0 } diff --git a/vendor/github.com/grafana/dskit/server/limits.go b/vendor/github.com/grafana/dskit/server/limits.go index 6b18bb1cb0c2c..4a8651e323abc 100644 --- a/vendor/github.com/grafana/dskit/server/limits.go +++ b/vendor/github.com/grafana/dskit/server/limits.go @@ -4,6 +4,7 @@ import ( "context" "strings" + "google.golang.org/grpc/metadata" "google.golang.org/grpc/stats" "google.golang.org/grpc/tap" ) @@ -11,19 +12,15 @@ import ( type GrpcInflightMethodLimiter interface { // RPCCallStarting is called before request has been read into memory. // All that's known about the request at this point is grpc method name. + // + // Returned context is used during the remainder of the gRPC call. + // // Returned error should be convertible to gRPC Status via status.FromError, // otherwise gRPC-server implementation-specific error will be returned to the client (codes.PermissionDenied in grpc@v1.55.0). - RPCCallStarting(methodName string) error - RPCCallFinished(methodName string) -} - -// Custom type to hide it from other packages. -type grpcLimitCheckContextKey int + RPCCallStarting(ctx context.Context, methodName string, md metadata.MD) (context.Context, error) -// Presence of this key in the context indicates that inflight request counter was increased for this request, and needs to be decreased when request ends. -const ( - requestFullMethod grpcLimitCheckContextKey = 1 -) + RPCCallFinished(ctx context.Context) +} func newGrpcInflightLimitCheck(methodLimiter GrpcInflightMethodLimiter) *grpcInflightLimitCheck { return &grpcInflightLimitCheck{ @@ -38,8 +35,8 @@ type grpcInflightLimitCheck struct { } // TapHandle is called after receiving grpc request and headers, but before reading any request data yet. -// If we reject request here, it won't be counted towards any metrics (eg. in middleware.grpcStatsHandler). -// If we accept request (not return error), eventually HandleRPC with stats.End notification will be called. +// If we reject request here (by returning non-nil error), it won't be counted towards any metrics (eg. in middleware.grpcStatsHandler). +// If we accept request (no error), eventually HandleRPC with stats.End notification will be called. func (g *grpcInflightLimitCheck) TapHandle(ctx context.Context, info *tap.Info) (context.Context, error) { if !isMethodNameValid(info.FullMethodName) { // If method name is not valid, we let the request continue, but not call method limiter. @@ -47,12 +44,7 @@ func (g *grpcInflightLimitCheck) TapHandle(ctx context.Context, info *tap.Info) return ctx, nil } - if err := g.methodLimiter.RPCCallStarting(info.FullMethodName); err != nil { - return ctx, err - } - - ctx = context.WithValue(ctx, requestFullMethod, info.FullMethodName) - return ctx, nil + return g.methodLimiter.RPCCallStarting(ctx, info.FullMethodName, info.Header) } func (g *grpcInflightLimitCheck) TagRPC(ctx context.Context, _ *stats.RPCTagInfo) context.Context { @@ -65,9 +57,7 @@ func (g *grpcInflightLimitCheck) HandleRPC(ctx context.Context, rpcStats stats.R return } - if name, ok := ctx.Value(requestFullMethod).(string); ok { - g.methodLimiter.RPCCallFinished(name) - } + g.methodLimiter.RPCCallFinished(ctx) } func (g *grpcInflightLimitCheck) TagConn(ctx context.Context, _ *stats.ConnTagInfo) context.Context { diff --git a/vendor/github.com/grafana/dskit/server/server.go b/vendor/github.com/grafana/dskit/server/server.go index 9e65b01053809..2b54283df7f21 100644 --- a/vendor/github.com/grafana/dskit/server/server.go +++ b/vendor/github.com/grafana/dskit/server/server.go @@ -92,15 +92,19 @@ type Config struct { HTTPTLSConfig TLSConfig `yaml:"http_tls_config"` GRPCTLSConfig TLSConfig `yaml:"grpc_tls_config"` - RegisterInstrumentation bool `yaml:"register_instrumentation"` - ExcludeRequestInLog bool `yaml:"-"` - DisableRequestSuccessLog bool `yaml:"-"` + RegisterInstrumentation bool `yaml:"register_instrumentation"` + ReportGRPCCodesInInstrumentationLabel bool `yaml:"report_grpc_codes_in_instrumentation_label_enabled"` + ExcludeRequestInLog bool `yaml:"-"` + DisableRequestSuccessLog bool `yaml:"-"` ServerGracefulShutdownTimeout time.Duration `yaml:"graceful_shutdown_timeout"` HTTPServerReadTimeout time.Duration `yaml:"http_server_read_timeout"` + HTTPServerReadHeaderTimeout time.Duration `yaml:"http_server_read_header_timeout"` HTTPServerWriteTimeout time.Duration `yaml:"http_server_write_timeout"` HTTPServerIdleTimeout time.Duration `yaml:"http_server_idle_timeout"` + HTTPLogClosedConnectionsWithoutResponse bool `yaml:"http_log_closed_connections_without_response_enabled"` + GRPCOptions []grpc.ServerOption `yaml:"-"` GRPCMiddleware []grpc.UnaryServerInterceptor `yaml:"-"` GRPCStreamMiddleware []grpc.StreamServerInterceptor `yaml:"-"` @@ -109,9 +113,9 @@ type Config struct { DoNotAddDefaultHTTPMiddleware bool `yaml:"-"` RouteHTTPToGRPC bool `yaml:"-"` - GPRCServerMaxRecvMsgSize int `yaml:"grpc_server_max_recv_msg_size"` + GRPCServerMaxRecvMsgSize int `yaml:"grpc_server_max_recv_msg_size"` GRPCServerMaxSendMsgSize int `yaml:"grpc_server_max_send_msg_size"` - GPRCServerMaxConcurrentStreams uint `yaml:"grpc_server_max_concurrent_streams"` + GRPCServerMaxConcurrentStreams uint `yaml:"grpc_server_max_concurrent_streams"` GRPCServerMaxConnectionIdle time.Duration `yaml:"grpc_server_max_connection_idle"` GRPCServerMaxConnectionAge time.Duration `yaml:"grpc_server_max_connection_age"` GRPCServerMaxConnectionAgeGrace time.Duration `yaml:"grpc_server_max_connection_age_grace"` @@ -167,13 +171,16 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { f.IntVar(&cfg.GRPCListenPort, "server.grpc-listen-port", 9095, "gRPC server listen port.") f.IntVar(&cfg.GRPCConnLimit, "server.grpc-conn-limit", 0, "Maximum number of simultaneous grpc connections, <=0 to disable") f.BoolVar(&cfg.RegisterInstrumentation, "server.register-instrumentation", true, "Register the intrumentation handlers (/metrics etc).") + f.BoolVar(&cfg.ReportGRPCCodesInInstrumentationLabel, "server.report-grpc-codes-in-instrumentation-label-enabled", false, "If set to true, gRPC statuses will be reported in instrumentation labels with their string representations. Otherwise, they will be reported as \"error\".") f.DurationVar(&cfg.ServerGracefulShutdownTimeout, "server.graceful-shutdown-timeout", 30*time.Second, "Timeout for graceful shutdowns") - f.DurationVar(&cfg.HTTPServerReadTimeout, "server.http-read-timeout", 30*time.Second, "Read timeout for HTTP server") + f.DurationVar(&cfg.HTTPServerReadTimeout, "server.http-read-timeout", 30*time.Second, "Read timeout for entire HTTP request, including headers and body.") + f.DurationVar(&cfg.HTTPServerReadHeaderTimeout, "server.http-read-header-timeout", 0, "Read timeout for HTTP request headers. If set to 0, value of -server.http-read-timeout is used.") f.DurationVar(&cfg.HTTPServerWriteTimeout, "server.http-write-timeout", 30*time.Second, "Write timeout for HTTP server") f.DurationVar(&cfg.HTTPServerIdleTimeout, "server.http-idle-timeout", 120*time.Second, "Idle timeout for HTTP server") - f.IntVar(&cfg.GPRCServerMaxRecvMsgSize, "server.grpc-max-recv-msg-size-bytes", 4*1024*1024, "Limit on the size of a gRPC message this server can receive (bytes).") + f.BoolVar(&cfg.HTTPLogClosedConnectionsWithoutResponse, "server.http-log-closed-connections-without-response-enabled", false, "Log closed connections that did not receive any response, most likely because client didn't send any request within timeout.") + f.IntVar(&cfg.GRPCServerMaxRecvMsgSize, "server.grpc-max-recv-msg-size-bytes", 4*1024*1024, "Limit on the size of a gRPC message this server can receive (bytes).") f.IntVar(&cfg.GRPCServerMaxSendMsgSize, "server.grpc-max-send-msg-size-bytes", 4*1024*1024, "Limit on the size of a gRPC message this server can send (bytes).") - f.UintVar(&cfg.GPRCServerMaxConcurrentStreams, "server.grpc-max-concurrent-streams", 100, "Limit on the number of concurrent streams for gRPC calls per client connection (0 = unlimited)") + f.UintVar(&cfg.GRPCServerMaxConcurrentStreams, "server.grpc-max-concurrent-streams", 100, "Limit on the number of concurrent streams for gRPC calls per client connection (0 = unlimited)") f.DurationVar(&cfg.GRPCServerMaxConnectionIdle, "server.grpc.keepalive.max-connection-idle", infinty, "The duration after which an idle connection should be closed. Default: infinity") f.DurationVar(&cfg.GRPCServerMaxConnectionAge, "server.grpc.keepalive.max-connection-age", infinty, "The duration for the maximum amount of time a connection may exist before it will be closed. Default: infinity") f.DurationVar(&cfg.GRPCServerMaxConnectionAgeGrace, "server.grpc.keepalive.max-connection-age-grace", infinty, "An additive period after max-connection-age after which the connection will be forcibly closed. Default: infinity") @@ -259,6 +266,9 @@ func newServer(cfg Config, metrics *Metrics) (*Server, error) { return nil, err } httpListener = middleware.CountingListener(httpListener, metrics.TCPConnections.WithLabelValues("http")) + if cfg.HTTPLogClosedConnectionsWithoutResponse { + httpListener = middleware.NewZeroResponseListener(httpListener, level.Warn(logger)) + } metrics.TCPConnectionsLimit.WithLabelValues("http").Set(float64(cfg.HTTPConnLimit)) if cfg.HTTPConnLimit > 0 { @@ -346,17 +356,21 @@ func newServer(cfg Config, metrics *Metrics) (*Server, error) { WithRequest: !cfg.ExcludeRequestInLog, DisableRequestSuccessLog: cfg.DisableRequestSuccessLog, } + var reportGRPCStatusesOptions []middleware.InstrumentationOption + if cfg.ReportGRPCCodesInInstrumentationLabel { + reportGRPCStatusesOptions = []middleware.InstrumentationOption{middleware.ReportGRPCStatusOption} + } grpcMiddleware := []grpc.UnaryServerInterceptor{ serverLog.UnaryServerInterceptor, otgrpc.OpenTracingServerInterceptor(opentracing.GlobalTracer()), - middleware.UnaryServerInstrumentInterceptor(metrics.RequestDuration), + middleware.UnaryServerInstrumentInterceptor(metrics.RequestDuration, reportGRPCStatusesOptions...), } grpcMiddleware = append(grpcMiddleware, cfg.GRPCMiddleware...) grpcStreamMiddleware := []grpc.StreamServerInterceptor{ serverLog.StreamServerInterceptor, otgrpc.OpenTracingStreamServerInterceptor(opentracing.GlobalTracer()), - middleware.StreamServerInstrumentInterceptor(metrics.RequestDuration), + middleware.StreamServerInstrumentInterceptor(metrics.RequestDuration, reportGRPCStatusesOptions...), } grpcStreamMiddleware = append(grpcStreamMiddleware, cfg.GRPCStreamMiddleware...) @@ -378,9 +392,9 @@ func newServer(cfg Config, metrics *Metrics) (*Server, error) { grpc.ChainStreamInterceptor(grpcStreamMiddleware...), grpc.KeepaliveParams(grpcKeepAliveOptions), grpc.KeepaliveEnforcementPolicy(grpcKeepAliveEnforcementPolicy), - grpc.MaxRecvMsgSize(cfg.GPRCServerMaxRecvMsgSize), + grpc.MaxRecvMsgSize(cfg.GRPCServerMaxRecvMsgSize), grpc.MaxSendMsgSize(cfg.GRPCServerMaxSendMsgSize), - grpc.MaxConcurrentStreams(uint32(cfg.GPRCServerMaxConcurrentStreams)), + grpc.MaxConcurrentStreams(uint32(cfg.GRPCServerMaxConcurrentStreams)), grpc.NumStreamWorkers(uint32(cfg.GRPCServerNumWorkers)), } @@ -457,10 +471,11 @@ func newServer(cfg Config, metrics *Metrics) (*Server, error) { } httpServer := &http.Server{ - ReadTimeout: cfg.HTTPServerReadTimeout, - WriteTimeout: cfg.HTTPServerWriteTimeout, - IdleTimeout: cfg.HTTPServerIdleTimeout, - Handler: middleware.Merge(httpMiddleware...).Wrap(router), + ReadTimeout: cfg.HTTPServerReadTimeout, + ReadHeaderTimeout: cfg.HTTPServerReadHeaderTimeout, + WriteTimeout: cfg.HTTPServerWriteTimeout, + IdleTimeout: cfg.HTTPServerIdleTimeout, + Handler: middleware.Merge(httpMiddleware...).Wrap(router), } if httpTLSConfig != nil { httpServer.TLSConfig = httpTLSConfig diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen1.go b/vendor/golang.org/x/oauth2/google/appengine_gen1.go index 16c6c6b90ce50..e61587945b08f 100644 --- a/vendor/golang.org/x/oauth2/google/appengine_gen1.go +++ b/vendor/golang.org/x/oauth2/google/appengine_gen1.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build appengine -// +build appengine // This file applies to App Engine first generation runtimes (<= Go 1.9). diff --git a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go index a7e27b3d2991c..9c79aa0a0cc5d 100644 --- a/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go +++ b/vendor/golang.org/x/oauth2/google/appengine_gen2_flex.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build !appengine -// +build !appengine // This file applies to App Engine second generation runtimes (>= Go 1.11) and App Engine flexible. diff --git a/vendor/golang.org/x/oauth2/internal/client_appengine.go b/vendor/golang.org/x/oauth2/internal/client_appengine.go index e1755d1d9acf4..d28140f789ec9 100644 --- a/vendor/golang.org/x/oauth2/internal/client_appengine.go +++ b/vendor/golang.org/x/oauth2/internal/client_appengine.go @@ -3,7 +3,6 @@ // license that can be found in the LICENSE file. //go:build appengine -// +build appengine package internal diff --git a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go index dbe2e2d0c6579..6ce01ac9a69c7 100644 --- a/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/api/annotations/field_behavior.pb.go @@ -15,7 +15,7 @@ // Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.26.0 -// protoc v3.21.9 +// protoc v3.21.12 // source: google/api/field_behavior.proto package annotations @@ -78,6 +78,19 @@ const ( // a non-empty value will be returned. The user will not be aware of what // non-empty value to expect. FieldBehavior_NON_EMPTY_DEFAULT FieldBehavior = 7 + // Denotes that the field in a resource (a message annotated with + // google.api.resource) is used in the resource name to uniquely identify the + // resource. For AIP-compliant APIs, this should only be applied to the + // `name` field on the resource. + // + // This behavior should not be applied to references to other resources within + // the message. + // + // The identifier field of resources often have different field behavior + // depending on the request it is embedded in (e.g. for Create methods name + // is optional and unused, while for Update methods it is required). Instead + // of method-specific annotations, only `IDENTIFIER` is required. + FieldBehavior_IDENTIFIER FieldBehavior = 8 ) // Enum value maps for FieldBehavior. @@ -91,6 +104,7 @@ var ( 5: "IMMUTABLE", 6: "UNORDERED_LIST", 7: "NON_EMPTY_DEFAULT", + 8: "IDENTIFIER", } FieldBehavior_value = map[string]int32{ "FIELD_BEHAVIOR_UNSPECIFIED": 0, @@ -101,6 +115,7 @@ var ( "IMMUTABLE": 5, "UNORDERED_LIST": 6, "NON_EMPTY_DEFAULT": 7, + "IDENTIFIER": 8, } ) @@ -169,7 +184,7 @@ var file_google_api_field_behavior_proto_rawDesc = []byte{ 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2a, - 0xa6, 0x01, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, + 0xb6, 0x01, 0x0a, 0x0d, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x42, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x1e, 0x0a, 0x1a, 0x46, 0x49, 0x45, 0x4c, 0x44, 0x5f, 0x42, 0x45, 0x48, 0x41, 0x56, 0x49, 0x4f, 0x52, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x4f, 0x50, 0x54, 0x49, 0x4f, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, @@ -179,7 +194,8 @@ var file_google_api_field_behavior_proto_rawDesc = []byte{ 0x0a, 0x09, 0x49, 0x4d, 0x4d, 0x55, 0x54, 0x41, 0x42, 0x4c, 0x45, 0x10, 0x05, 0x12, 0x12, 0x0a, 0x0e, 0x55, 0x4e, 0x4f, 0x52, 0x44, 0x45, 0x52, 0x45, 0x44, 0x5f, 0x4c, 0x49, 0x53, 0x54, 0x10, 0x06, 0x12, 0x15, 0x0a, 0x11, 0x4e, 0x4f, 0x4e, 0x5f, 0x45, 0x4d, 0x50, 0x54, 0x59, 0x5f, 0x44, - 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x07, 0x3a, 0x60, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, + 0x45, 0x46, 0x41, 0x55, 0x4c, 0x54, 0x10, 0x07, 0x12, 0x0e, 0x0a, 0x0a, 0x49, 0x44, 0x45, 0x4e, + 0x54, 0x49, 0x46, 0x49, 0x45, 0x52, 0x10, 0x08, 0x3a, 0x60, 0x0a, 0x0e, 0x66, 0x69, 0x65, 0x6c, 0x64, 0x5f, 0x62, 0x65, 0x68, 0x61, 0x76, 0x69, 0x6f, 0x72, 0x12, 0x1d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x46, 0x69, 0x65, 0x6c, 0x64, 0x4f, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x9c, 0x08, 0x20, 0x03, 0x28, 0x0e, diff --git a/vendor/google.golang.org/grpc/README.md b/vendor/google.golang.org/grpc/README.md index 1bc92248cb470..ab0fbb79b863d 100644 --- a/vendor/google.golang.org/grpc/README.md +++ b/vendor/google.golang.org/grpc/README.md @@ -1,8 +1,8 @@ # gRPC-Go -[![Build Status](https://travis-ci.org/grpc/grpc-go.svg)](https://travis-ci.org/grpc/grpc-go) [![GoDoc](https://pkg.go.dev/badge/google.golang.org/grpc)][API] [![GoReportCard](https://goreportcard.com/badge/grpc/grpc-go)](https://goreportcard.com/report/github.com/grpc/grpc-go) +[![codecov](https://codecov.io/gh/grpc/grpc-go/graph/badge.svg)](https://codecov.io/gh/grpc/grpc-go) The [Go][] implementation of [gRPC][]: A high performance, open source, general RPC framework that puts mobile and HTTP/2 first. For more information see the diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index 712fef4d0fb9d..52d530d7ad01c 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -121,9 +121,9 @@ func (a *Attributes) String() string { return sb.String() } -func str(x any) string { +func str(x any) (s string) { if v, ok := x.(fmt.Stringer); ok { - return v.String() + return fmt.Sprint(v) } else if v, ok := x.(string); ok { return v } diff --git a/vendor/google.golang.org/grpc/authz/audit/audit_logger.go b/vendor/google.golang.org/grpc/authz/audit/audit_logger.go index b9b7219703876..7ea79410ad743 100644 --- a/vendor/google.golang.org/grpc/authz/audit/audit_logger.go +++ b/vendor/google.golang.org/grpc/authz/audit/audit_logger.go @@ -89,9 +89,9 @@ type LoggerConfig interface { // decision meets the condition for audit, all the configured audit loggers' // Log() method will be invoked to log that event. // -// TODO(lwge): Change the link to the merged gRFC once it's ready. -// Please refer to https://github.com/grpc/proposal/pull/346 for more details -// about audit logging. +// Please refer to +// https://github.com/grpc/proposal/blob/master/A59-audit-logging.md for more +// details about audit logging. type Logger interface { // Log performs audit logging for the provided audit event. // @@ -107,9 +107,9 @@ type Logger interface { // implement this interface, along with the Logger interface, and register // it by calling RegisterLoggerBuilder() at init time. // -// TODO(lwge): Change the link to the merged gRFC once it's ready. -// Please refer to https://github.com/grpc/proposal/pull/346 for more details -// about audit logging. +// Please refer to +// https://github.com/grpc/proposal/blob/master/A59-audit-logging.md for more +// details about audit logging. type LoggerBuilder interface { // ParseLoggerConfig parses the given JSON bytes into a structured // logger config this builder can use to build an audit logger. diff --git a/vendor/google.golang.org/grpc/balancer/balancer.go b/vendor/google.golang.org/grpc/balancer/balancer.go index b6377f445ad24..d79560a2e268f 100644 --- a/vendor/google.golang.org/grpc/balancer/balancer.go +++ b/vendor/google.golang.org/grpc/balancer/balancer.go @@ -30,6 +30,7 @@ import ( "google.golang.org/grpc/channelz" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" @@ -39,6 +40,8 @@ import ( var ( // m is a map from name to balancer builder. m = make(map[string]Builder) + + logger = grpclog.Component("balancer") ) // Register registers the balancer builder to the balancer map. b.Name @@ -51,6 +54,12 @@ var ( // an init() function), and is not thread-safe. If multiple Balancers are // registered with the same name, the one registered last will take effect. func Register(b Builder) { + if strings.ToLower(b.Name()) != b.Name() { + // TODO: Skip the use of strings.ToLower() to index the map after v1.59 + // is released to switch to case sensitive balancer registry. Also, + // remove this warning and update the docstrings for Register and Get. + logger.Warningf("Balancer registered with name %q. grpc-go will be switching to case sensitive balancer registries soon", b.Name()) + } m[strings.ToLower(b.Name())] = b } @@ -70,6 +79,12 @@ func init() { // Note that the compare is done in a case-insensitive fashion. // If no builder is register with the name, nil will be returned. func Get(name string) Builder { + if strings.ToLower(name) != name { + // TODO: Skip the use of strings.ToLower() to index the map after v1.59 + // is released to switch to case sensitive balancer registry. Also, + // remove this warning and update the docstrings for Register and Get. + logger.Warningf("Balancer retrieved for name %q. grpc-go will be switching to case sensitive balancer registries soon", name) + } if b, ok := m[strings.ToLower(name)]; ok { return b } diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go index f2ddfc3788ed9..86ba65be4c004 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb.go @@ -32,14 +32,18 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/balancer" + "google.golang.org/grpc/balancer/base" grpclbstate "google.golang.org/grpc/balancer/grpclb/state" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/internal" "google.golang.org/grpc/internal/backoff" + internalgrpclog "google.golang.org/grpc/internal/grpclog" + "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/internal/resolver/dns" "google.golang.org/grpc/resolver" + "google.golang.org/grpc/resolver/manual" durationpb "github.com/golang/protobuf/ptypes/duration" lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" @@ -132,7 +136,11 @@ func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) bal // This generates a manual resolver builder with a fixed scheme. This // scheme will be used to dial to remote LB, so we can send filtered // address updates to remote LB ClientConn using this manual resolver. - r := &lbManualResolver{scheme: "grpclb-internal", ccb: cc} + mr := manual.NewBuilderWithScheme("grpclb-internal") + // ResolveNow() on this manual resolver is forwarded to the parent + // ClientConn, so when grpclb client loses contact with the remote balancer, + // the parent ClientConn's resolver will re-resolve. + mr.ResolveNowCallback = cc.ResolveNow lb := &lbBalancer{ cc: newLBCacheClientConn(cc), @@ -142,23 +150,24 @@ func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) bal fallbackTimeout: b.fallbackTimeout, doneCh: make(chan struct{}), - manualResolver: r, + manualResolver: mr, subConns: make(map[resolver.Address]balancer.SubConn), scStates: make(map[balancer.SubConn]connectivity.State), - picker: &errPicker{err: balancer.ErrNoSubConnAvailable}, + picker: base.NewErrPicker(balancer.ErrNoSubConnAvailable), clientStats: newRPCStats(), backoff: backoff.DefaultExponential, // TODO: make backoff configurable. } + lb.logger = internalgrpclog.NewPrefixLogger(logger, fmt.Sprintf("[grpclb %p] ", lb)) var err error if opt.CredsBundle != nil { lb.grpclbClientConnCreds, err = opt.CredsBundle.NewWithMode(internal.CredsBundleModeBalancer) if err != nil { - logger.Warningf("lbBalancer: client connection creds NewWithMode failed: %v", err) + lb.logger.Warningf("Failed to create credentials used for connecting to grpclb: %v", err) } lb.grpclbBackendCreds, err = opt.CredsBundle.NewWithMode(internal.CredsBundleModeBackendFromBalancer) if err != nil { - logger.Warningf("lbBalancer: backend creds NewWithMode failed: %v", err) + lb.logger.Warningf("Failed to create credentials used for connecting to backends returned by grpclb: %v", err) } } @@ -170,6 +179,7 @@ type lbBalancer struct { dialTarget string // user's dial target target string // same as dialTarget unless overridden in service config opt balancer.BuildOptions + logger *internalgrpclog.PrefixLogger usePickFirst bool @@ -188,7 +198,7 @@ type lbBalancer struct { // manualResolver is used in the remote LB ClientConn inside grpclb. When // resolved address updates are received by grpclb, filtered updates will be // send to remote LB ClientConn through this resolver. - manualResolver *lbManualResolver + manualResolver *manual.Resolver // The ClientConn to talk to the remote balancer. ccRemoteLB *remoteBalancerCCWrapper // backoff for calling remote balancer. @@ -236,12 +246,12 @@ type lbBalancer struct { // Caller must hold lb.mu. func (lb *lbBalancer) regeneratePicker(resetDrop bool) { if lb.state == connectivity.TransientFailure { - lb.picker = &errPicker{err: fmt.Errorf("all SubConns are in TransientFailure, last connection error: %v", lb.connErr)} + lb.picker = base.NewErrPicker(fmt.Errorf("all SubConns are in TransientFailure, last connection error: %v", lb.connErr)) return } if lb.state == connectivity.Connecting { - lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable} + lb.picker = base.NewErrPicker(balancer.ErrNoSubConnAvailable) return } @@ -268,7 +278,7 @@ func (lb *lbBalancer) regeneratePicker(resetDrop bool) { // // This doesn't seem to be necessary after the connecting check above. // Kept for safety. - lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable} + lb.picker = base.NewErrPicker(balancer.ErrNoSubConnAvailable) return } if lb.inFallback { @@ -322,21 +332,21 @@ func (lb *lbBalancer) aggregateSubConnStates() connectivity.State { // UpdateSubConnState is unused; NewSubConn's options always specifies // updateSubConnState as the listener. func (lb *lbBalancer) UpdateSubConnState(sc balancer.SubConn, scs balancer.SubConnState) { - logger.Errorf("grpclb: UpdateSubConnState(%v, %+v) called unexpectedly", sc, scs) + lb.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, scs) } func (lb *lbBalancer) updateSubConnState(sc balancer.SubConn, scs balancer.SubConnState) { s := scs.ConnectivityState - if logger.V(2) { - logger.Infof("lbBalancer: handle SubConn state change: %p, %v", sc, s) + if lb.logger.V(2) { + lb.logger.Infof("SubConn state change: %p, %v", sc, s) } lb.mu.Lock() defer lb.mu.Unlock() oldS, ok := lb.scStates[sc] if !ok { - if logger.V(2) { - logger.Infof("lbBalancer: got state changes for an unknown SubConn: %p, %v", sc, s) + if lb.logger.V(2) { + lb.logger.Infof("Received state change for an unknown SubConn: %p, %v", sc, s) } return } @@ -441,8 +451,8 @@ func (lb *lbBalancer) handleServiceConfig(gc *grpclbServiceConfig) { if lb.usePickFirst == newUsePickFirst { return } - if logger.V(2) { - logger.Infof("lbBalancer: switching mode, new usePickFirst: %+v", newUsePickFirst) + if lb.logger.V(2) { + lb.logger.Infof("Switching mode. Is pick_first used for backends? %v", newUsePickFirst) } lb.refreshSubConns(lb.backendAddrs, lb.inFallback, newUsePickFirst) } @@ -453,8 +463,8 @@ func (lb *lbBalancer) ResolverError(error) { } func (lb *lbBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error { - if logger.V(2) { - logger.Infof("lbBalancer: UpdateClientConnState: %+v", ccs) + if lb.logger.V(2) { + lb.logger.Infof("UpdateClientConnState: %s", pretty.ToJSON(ccs)) } gc, _ := ccs.BalancerConfig.(*grpclbServiceConfig) lb.handleServiceConfig(gc) @@ -482,7 +492,9 @@ func (lb *lbBalancer) UpdateClientConnState(ccs balancer.ClientConnState) error } else if lb.ccRemoteLB == nil { // First time receiving resolved addresses, create a cc to remote // balancers. - lb.newRemoteBalancerCCWrapper() + if err := lb.newRemoteBalancerCCWrapper(); err != nil { + return err + } // Start the fallback goroutine. go lb.fallbackToBackendsAfter(lb.fallbackTimeout) } diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go index 39bc5cc71e819..20c5f2ec3967b 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_picker.go @@ -98,15 +98,6 @@ func (s *rpcStats) knownReceived() { atomic.AddInt64(&s.numCallsFinished, 1) } -type errPicker struct { - // Pick always returns this err. - err error -} - -func (p *errPicker) Pick(balancer.PickInfo) (balancer.PickResult, error) { - return balancer.PickResult{}, p.err -} - // rrPicker does roundrobin on subConns. It's typically used when there's no // response from remote balancer, and grpclb falls back to the resolved // backends. diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go index edb66a90a3b1b..c8fe1edd8e530 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_remote_balancer.go @@ -27,11 +27,8 @@ import ( "time" "github.com/golang/protobuf/proto" - timestamppb "github.com/golang/protobuf/ptypes/timestamp" - "github.com/google/go-cmp/cmp" "google.golang.org/grpc" "google.golang.org/grpc/balancer" - lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" "google.golang.org/grpc/connectivity" "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/internal/backoff" @@ -39,13 +36,28 @@ import ( "google.golang.org/grpc/keepalive" "google.golang.org/grpc/metadata" "google.golang.org/grpc/resolver" + + timestamppb "github.com/golang/protobuf/ptypes/timestamp" + lbpb "google.golang.org/grpc/balancer/grpclb/grpc_lb_v1" ) +func serverListEqual(a, b []*lbpb.Server) bool { + if len(a) != len(b) { + return false + } + for i := 0; i < len(a); i++ { + if !proto.Equal(a[i], b[i]) { + return false + } + } + return true +} + // processServerList updates balancer's internal state, create/remove SubConns // and regenerates picker using the received serverList. func (lb *lbBalancer) processServerList(l *lbpb.ServerList) { - if logger.V(2) { - logger.Infof("lbBalancer: processing server list: %+v", l) + if lb.logger.V(2) { + lb.logger.Infof("Processing server list: %#v", l) } lb.mu.Lock() defer lb.mu.Unlock() @@ -55,9 +67,9 @@ func (lb *lbBalancer) processServerList(l *lbpb.ServerList) { lb.serverListReceived = true // If the new server list == old server list, do nothing. - if cmp.Equal(lb.fullServerList, l.Servers, cmp.Comparer(proto.Equal)) { - if logger.V(2) { - logger.Infof("lbBalancer: new serverlist same as the previous one, ignoring") + if serverListEqual(lb.fullServerList, l.Servers) { + if lb.logger.V(2) { + lb.logger.Infof("Ignoring new server list as it is the same as the previous one") } return } @@ -78,9 +90,8 @@ func (lb *lbBalancer) processServerList(l *lbpb.ServerList) { ipStr = fmt.Sprintf("[%s]", ipStr) } addr := imetadata.Set(resolver.Address{Addr: fmt.Sprintf("%s:%d", ipStr, s.Port)}, md) - if logger.V(2) { - logger.Infof("lbBalancer: server list entry[%d]: ipStr:|%s|, port:|%d|, load balancer token:|%v|", - i, ipStr, s.Port, s.LoadBalanceToken) + if lb.logger.V(2) { + lb.logger.Infof("Server list entry:|%d|, ipStr:|%s|, port:|%d|, load balancer token:|%v|", i, ipStr, s.Port, s.LoadBalanceToken) } backendAddrs = append(backendAddrs, addr) } @@ -149,7 +160,7 @@ func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback // This bypasses the cc wrapper with SubConn cache. sc, err := lb.cc.ClientConn.NewSubConn(backendAddrs, opts) if err != nil { - logger.Warningf("grpclb: failed to create new SubConn: %v", err) + lb.logger.Warningf("Failed to create new SubConn: %v", err) return } sc.Connect() @@ -174,7 +185,7 @@ func (lb *lbBalancer) refreshSubConns(backendAddrs []resolver.Address, fallback opts.StateListener = func(scs balancer.SubConnState) { lb.updateSubConnState(sc, scs) } sc, err := lb.cc.NewSubConn([]resolver.Address{addr}, opts) if err != nil { - logger.Warningf("grpclb: failed to create new SubConn: %v", err) + lb.logger.Warningf("Failed to create new SubConn: %v", err) continue } lb.subConns[addrWithoutAttrs] = sc // Use the addr without MD as key for the map. @@ -217,7 +228,7 @@ type remoteBalancerCCWrapper struct { wg sync.WaitGroup } -func (lb *lbBalancer) newRemoteBalancerCCWrapper() { +func (lb *lbBalancer) newRemoteBalancerCCWrapper() error { var dopts []grpc.DialOption if creds := lb.opt.DialCreds; creds != nil { dopts = append(dopts, grpc.WithTransportCredentials(creds)) @@ -248,9 +259,10 @@ func (lb *lbBalancer) newRemoteBalancerCCWrapper() { // // The grpclb server addresses will set field ServerName, and creds will // receive ServerName as authority. - cc, err := grpc.DialContext(context.Background(), lb.manualResolver.Scheme()+":///grpclb.subClientConn", dopts...) + target := lb.manualResolver.Scheme() + ":///grpclb.subClientConn" + cc, err := grpc.Dial(target, dopts...) if err != nil { - logger.Fatalf("failed to dial: %v", err) + return fmt.Errorf("grpc.Dial(%s): %v", target, err) } ccw := &remoteBalancerCCWrapper{ cc: cc, @@ -261,6 +273,7 @@ func (lb *lbBalancer) newRemoteBalancerCCWrapper() { lb.ccRemoteLB = ccw ccw.wg.Add(1) go ccw.watchRemoteBalancer() + return nil } // close closed the ClientConn to remote balancer, and waits until all @@ -408,9 +421,9 @@ func (ccw *remoteBalancerCCWrapper) watchRemoteBalancer() { default: if err != nil { if err == errServerTerminatedConnection { - logger.Info(err) + ccw.lb.logger.Infof("Call to remote balancer failed: %v", err) } else { - logger.Warning(err) + ccw.lb.logger.Warningf("Call to remote balancer failed: %v", err) } } } diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go index 680779f1c82eb..c0f762c0c050e 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpclb_util.go @@ -27,67 +27,6 @@ import ( "google.golang.org/grpc/resolver" ) -// The parent ClientConn should re-resolve when grpclb loses connection to the -// remote balancer. When the ClientConn inside grpclb gets a TransientFailure, -// it calls lbManualResolver.ResolveNow(), which calls parent ClientConn's -// ResolveNow, and eventually results in re-resolve happening in parent -// ClientConn's resolver (DNS for example). -// -// parent -// ClientConn -// +-----------------------------------------------------------------+ -// | parent +---------------------------------+ | -// | DNS ClientConn | grpclb | | -// | resolver balancerWrapper | | | -// | + + | grpclb grpclb | | -// | | | | ManualResolver ClientConn | | -// | | | | + + | | -// | | | | | | Transient | | -// | | | | | | Failure | | -// | | | | | <--------- | | | -// | | | <--------------- | ResolveNow | | | -// | | <--------- | ResolveNow | | | | | -// | | ResolveNow | | | | | | -// | | | | | | | | -// | + + | + + | | -// | +---------------------------------+ | -// +-----------------------------------------------------------------+ - -// lbManualResolver is used by the ClientConn inside grpclb. It's a manual -// resolver with a special ResolveNow() function. -// -// When ResolveNow() is called, it calls ResolveNow() on the parent ClientConn, -// so when grpclb client lose contact with remote balancers, the parent -// ClientConn's resolver will re-resolve. -type lbManualResolver struct { - scheme string - ccr resolver.ClientConn - - ccb balancer.ClientConn -} - -func (r *lbManualResolver) Build(_ resolver.Target, cc resolver.ClientConn, _ resolver.BuildOptions) (resolver.Resolver, error) { - r.ccr = cc - return r, nil -} - -func (r *lbManualResolver) Scheme() string { - return r.scheme -} - -// ResolveNow calls resolveNow on the parent ClientConn. -func (r *lbManualResolver) ResolveNow(o resolver.ResolveNowOptions) { - r.ccb.ResolveNow(o) -} - -// Close is a noop for Resolver. -func (*lbManualResolver) Close() {} - -// UpdateState calls cc.UpdateState. -func (r *lbManualResolver) UpdateState(s resolver.State) { - r.ccr.UpdateState(s) -} - const subConnCacheTime = time.Second * 10 // lbCacheClientConn is a wrapper balancer.ClientConn with a SubConn cache. diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index ff7fea102288c..429c389e4730d 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -337,8 +337,8 @@ func (cc *ClientConn) exitIdleMode() error { return errConnClosing } if cc.idlenessState != ccIdlenessStateIdle { - cc.mu.Unlock() channelz.Infof(logger, cc.channelzID, "ClientConn asked to exit idle mode, current mode is %v", cc.idlenessState) + cc.mu.Unlock() return nil } @@ -404,13 +404,13 @@ func (cc *ClientConn) exitIdleMode() error { // name resolver, load balancer and any subchannels. func (cc *ClientConn) enterIdleMode() error { cc.mu.Lock() + defer cc.mu.Unlock() + if cc.conns == nil { - cc.mu.Unlock() return ErrClientConnClosing } if cc.idlenessState != ccIdlenessStateActive { - channelz.Errorf(logger, cc.channelzID, "ClientConn asked to enter idle mode, current mode is %v", cc.idlenessState) - cc.mu.Unlock() + channelz.Warningf(logger, cc.channelzID, "ClientConn asked to enter idle mode, current mode is %v", cc.idlenessState) return nil } @@ -431,14 +431,14 @@ func (cc *ClientConn) enterIdleMode() error { cc.balancerWrapper.enterIdleMode() cc.csMgr.updateState(connectivity.Idle) cc.idlenessState = ccIdlenessStateIdle - cc.mu.Unlock() + cc.addTraceEvent("entering idle mode") go func() { - cc.addTraceEvent("entering idle mode") for ac := range conns { ac.tearDown(errConnIdling) } }() + return nil } @@ -804,6 +804,12 @@ func init() { internal.SubscribeToConnectivityStateChanges = func(cc *ClientConn, s grpcsync.Subscriber) func() { return cc.csMgr.pubSub.Subscribe(s) } + internal.EnterIdleModeForTesting = func(cc *ClientConn) error { + return cc.enterIdleMode() + } + internal.ExitIdleModeForTesting = func(cc *ClientConn) error { + return cc.exitIdleMode() + } } func (cc *ClientConn) maybeApplyDefaultServiceConfig(addrs []resolver.Address) { diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 1fd0d5c127f4f..cfc9fd85e8dd9 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -644,6 +644,7 @@ func defaultDialOptions() dialOptions { UseProxy: true, }, recvBufferPool: nopBufferPool{}, + idleTimeout: 30 * time.Minute, } } @@ -680,8 +681,8 @@ func WithResolvers(rs ...resolver.Builder) DialOption { // channel will exit idle mode when the Connect() method is called or when an // RPC is initiated. // -// By default this feature is disabled, which can also be explicitly configured -// by passing zero to this function. +// A default timeout of 30 minutes will be used if this dial option is not set +// at dial time and idleness can be disabled by passing a timeout of zero. // // # Experimental // diff --git a/vendor/google.golang.org/grpc/encoding/encoding.go b/vendor/google.golang.org/grpc/encoding/encoding.go index 69d5580b6adfd..5ebf88d7147f2 100644 --- a/vendor/google.golang.org/grpc/encoding/encoding.go +++ b/vendor/google.golang.org/grpc/encoding/encoding.go @@ -38,6 +38,10 @@ const Identity = "identity" // Compressor is used for compressing and decompressing when sending or // receiving messages. +// +// If a Compressor implements `DecompressedSize(compressedBytes []byte) int`, +// gRPC will invoke it to determine the size of the buffer allocated for the +// result of decompression. A return value of -1 indicates unknown size. type Compressor interface { // Compress writes the data written to wc to w after compressing it. If an // error occurs while initializing the compressor, that error is returned @@ -51,15 +55,6 @@ type Compressor interface { // coding header. The result must be static; the result cannot change // between calls. Name() string - // If a Compressor implements - // DecompressedSize(compressedBytes []byte) int, gRPC will call it - // to determine the size of the buffer allocated for the result of decompression. - // Return -1 to indicate unknown size. - // - // Experimental - // - // Notice: This API is EXPERIMENTAL and may be changed or removed in a - // later release. } var registeredCompressor = make(map[string]Compressor) diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index a01a1b4d54bd5..4439cda0f3cb7 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -44,8 +44,15 @@ const ( // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. type HealthClient interface { - // If the requested service is unknown, the call will fail with status - // NOT_FOUND. + // Check gets the health of the specified service. If the requested service + // is unknown, the call will fail with status NOT_FOUND. If the caller does + // not specify a service name, the server should respond with its overall + // health status. + // + // Clients should set a deadline when calling Check, and can declare the + // server unhealthy if they do not receive a timely response. + // + // Check implementations should be idempotent and side effect free. Check(ctx context.Context, in *HealthCheckRequest, opts ...grpc.CallOption) (*HealthCheckResponse, error) // Performs a watch for the serving status of the requested service. // The server will immediately send back a message indicating the current @@ -118,8 +125,15 @@ func (x *healthWatchClient) Recv() (*HealthCheckResponse, error) { // All implementations should embed UnimplementedHealthServer // for forward compatibility type HealthServer interface { - // If the requested service is unknown, the call will fail with status - // NOT_FOUND. + // Check gets the health of the specified service. If the requested service + // is unknown, the call will fail with status NOT_FOUND. If the caller does + // not specify a service name, the server should respond with its overall + // health status. + // + // Clients should set a deadline when calling Check, and can declare the + // server unhealthy if they do not receive a timely response. + // + // Check implementations should be idempotent and side effect free. Check(context.Context, *HealthCheckRequest) (*HealthCheckResponse, error) // Performs a watch for the serving status of the requested service. // The server will immediately send back a message indicating the current diff --git a/vendor/google.golang.org/grpc/internal/backoff/backoff.go b/vendor/google.golang.org/grpc/internal/backoff/backoff.go index 5fc0ee3da53bc..fed1c011a3259 100644 --- a/vendor/google.golang.org/grpc/internal/backoff/backoff.go +++ b/vendor/google.golang.org/grpc/internal/backoff/backoff.go @@ -23,6 +23,8 @@ package backoff import ( + "context" + "errors" "time" grpcbackoff "google.golang.org/grpc/backoff" @@ -71,3 +73,37 @@ func (bc Exponential) Backoff(retries int) time.Duration { } return time.Duration(backoff) } + +// ErrResetBackoff is the error to be returned by the function executed by RunF, +// to instruct the latter to reset its backoff state. +var ErrResetBackoff = errors.New("reset backoff state") + +// RunF provides a convenient way to run a function f repeatedly until the +// context expires or f returns a non-nil error that is not ErrResetBackoff. +// When f returns ErrResetBackoff, RunF continues to run f, but resets its +// backoff state before doing so. backoff accepts an integer representing the +// number of retries, and returns the amount of time to backoff. +func RunF(ctx context.Context, f func() error, backoff func(int) time.Duration) { + attempt := 0 + timer := time.NewTimer(0) + for ctx.Err() == nil { + select { + case <-timer.C: + case <-ctx.Done(): + timer.Stop() + return + } + + err := f() + if errors.Is(err, ErrResetBackoff) { + timer.Reset(0) + attempt = 0 + continue + } + if err != nil { + return + } + timer.Reset(backoff(attempt)) + attempt++ + } +} diff --git a/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go b/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go index 8177fb58da9aa..4cee66aeb6e69 100644 --- a/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go +++ b/vendor/google.golang.org/grpc/internal/balancergroup/balancergroup.go @@ -328,6 +328,11 @@ func (bg *BalancerGroup) AddWithClientConn(id, balancerName string, cc balancer. // caching is disabled. if bg.outgoingStarted && bg.deletedBalancerCache != nil { if old, ok := bg.deletedBalancerCache.Remove(id); ok { + if bg.logger.V(2) { + bg.logger.Infof("Removing and reusing child policy of type %q for locality %q from the balancer cache", balancerName, id) + bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len()) + } + sbc, _ = old.(*subBalancerWrapper) if sbc != nil && sbc.builder != builder { // If the sub-balancer in cache was built with a different @@ -403,7 +408,7 @@ func (bg *BalancerGroup) Remove(id string) { sbToRemove, ok := bg.idToBalancerConfig[id] if !ok { - bg.logger.Infof("balancer group: trying to remove a non-existing locality from balancer group: %v", id) + bg.logger.Errorf("Child policy for locality %q does not exist in the balancer group", id) bg.outgoingMu.Unlock() return } @@ -418,7 +423,17 @@ func (bg *BalancerGroup) Remove(id string) { } if bg.deletedBalancerCache != nil { + if bg.logger.V(2) { + bg.logger.Infof("Adding child policy for locality %q to the balancer cache", id) + bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len()) + } + bg.deletedBalancerCache.Add(id, sbToRemove, func() { + if bg.logger.V(2) { + bg.logger.Infof("Removing child policy for locality %q from the balancer cache after timeout", id) + bg.logger.Infof("Number of items remaining in the balancer cache: %d", bg.deletedBalancerCache.Len()) + } + // A sub-balancer evicted from the timeout cache needs to closed // and its subConns need to removed, unconditionally. There is a // possibility that a sub-balancer might be removed (thereby diff --git a/vendor/google.golang.org/grpc/internal/cache/timeoutCache.go b/vendor/google.golang.org/grpc/internal/cache/timeoutCache.go index 3f2d47302c4e1..2fa48701023df 100644 --- a/vendor/google.golang.org/grpc/internal/cache/timeoutCache.go +++ b/vendor/google.golang.org/grpc/internal/cache/timeoutCache.go @@ -142,3 +142,10 @@ func (c *TimeoutCache) Clear(runCallback bool) { entry.callback() } } + +// Len returns the number of entries in the cache. +func (c *TimeoutCache) Len() int { + c.mu.Lock() + defer c.mu.Unlock() + return len(c.cache) +} diff --git a/vendor/google.golang.org/grpc/internal/internal.go b/vendor/google.golang.org/grpc/internal/internal.go index c8a8c76d628ca..0d94c63e06e2f 100644 --- a/vendor/google.golang.org/grpc/internal/internal.go +++ b/vendor/google.golang.org/grpc/internal/internal.go @@ -175,6 +175,12 @@ var ( // GRPCResolverSchemeExtraMetadata determines when gRPC will add extra // metadata to RPCs. GRPCResolverSchemeExtraMetadata string = "xds" + + // EnterIdleModeForTesting gets the ClientConn to enter IDLE mode. + EnterIdleModeForTesting any // func(*grpc.ClientConn) error + + // ExitIdleModeForTesting gets the ClientConn to exit IDLE mode. + ExitIdleModeForTesting any // func(*grpc.ClientConn) error ) // HealthChecker defines the signature of the client-side LB channel health checking function. diff --git a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go index 2f0417bd8db66..00f524a4809eb 100644 --- a/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go +++ b/vendor/google.golang.org/grpc/internal/proto/grpc_lookup_v1/rls.pb.go @@ -23,6 +23,7 @@ package grpc_lookup_v1 import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" reflect "reflect" sync "sync" ) @@ -98,6 +99,8 @@ type RouteLookupRequest struct { StaleHeaderData string `protobuf:"bytes,6,opt,name=stale_header_data,json=staleHeaderData,proto3" json:"stale_header_data,omitempty"` // Map of key values extracted via key builders for the gRPC or HTTP request. KeyMap map[string]string `protobuf:"bytes,4,rep,name=key_map,json=keyMap,proto3" json:"key_map,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + // Application-specific optional extensions. + Extensions []*anypb.Any `protobuf:"bytes,7,rep,name=extensions,proto3" json:"extensions,omitempty"` } func (x *RouteLookupRequest) Reset() { @@ -160,6 +163,13 @@ func (x *RouteLookupRequest) GetKeyMap() map[string]string { return nil } +func (x *RouteLookupRequest) GetExtensions() []*anypb.Any { + if x != nil { + return x.Extensions + } + return nil +} + type RouteLookupResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache @@ -173,6 +183,8 @@ type RouteLookupResponse struct { // Cached with "target" and sent with all requests that match the request key. // Allows the RLS to pass its work product to the eventual target. HeaderData string `protobuf:"bytes,2,opt,name=header_data,json=headerData,proto3" json:"header_data,omitempty"` + // Application-specific optional extensions. + Extensions []*anypb.Any `protobuf:"bytes,4,rep,name=extensions,proto3" json:"extensions,omitempty"` } func (x *RouteLookupResponse) Reset() { @@ -221,55 +233,70 @@ func (x *RouteLookupResponse) GetHeaderData() string { return "" } +func (x *RouteLookupResponse) GetExtensions() []*anypb.Any { + if x != nil { + return x.Extensions + } + return nil +} + var File_grpc_lookup_v1_rls_proto protoreflect.FileDescriptor var file_grpc_lookup_v1_rls_proto_rawDesc = []byte{ 0x0a, 0x18, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x6c, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x22, 0x83, 0x03, 0x0a, 0x12, 0x52, + 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xb9, 0x03, 0x0a, 0x12, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, + 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, + 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x41, 0x0a, + 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x29, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, + 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x2e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x52, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, + 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x74, 0x61, 0x6c, 0x65, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x73, 0x74, 0x61, + 0x6c, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x44, 0x61, 0x74, 0x61, 0x12, 0x47, 0x0a, 0x07, + 0x6b, 0x65, 0x79, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2e, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, + 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x2e, 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x52, 0x06, 0x6b, + 0x65, 0x79, 0x4d, 0x61, 0x70, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x07, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x1a, 0x39, 0x0a, 0x0b, 0x4b, + 0x65, 0x79, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, + 0x75, 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3f, 0x0a, 0x06, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, + 0x12, 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, + 0x57, 0x4e, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x4d, + 0x49, 0x53, 0x53, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, + 0x53, 0x54, 0x41, 0x4c, 0x45, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, + 0x02, 0x10, 0x03, 0x52, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x04, 0x70, 0x61, 0x74, + 0x68, 0x22, 0x94, 0x01, 0x0a, 0x13, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, + 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x61, 0x72, + 0x67, 0x65, 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, + 0x65, 0x74, 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x64, 0x61, + 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, + 0x44, 0x61, 0x74, 0x61, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, + 0x6e, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, 0x0a, + 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, + 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x32, 0x6e, 0x0a, 0x12, 0x52, 0x6f, 0x75, 0x74, + 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x58, + 0x0a, 0x0b, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x12, 0x22, 0x2e, + 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, 0x54, 0x79, - 0x70, 0x65, 0x12, 0x41, 0x0a, 0x06, 0x72, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, - 0x28, 0x0e, 0x32, 0x29, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, - 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x52, 0x06, 0x72, - 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, 0x2a, 0x0a, 0x11, 0x73, 0x74, 0x61, 0x6c, 0x65, 0x5f, 0x68, - 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0f, 0x73, 0x74, 0x61, 0x6c, 0x65, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x44, 0x61, 0x74, - 0x61, 0x12, 0x47, 0x0a, 0x07, 0x6b, 0x65, 0x79, 0x5f, 0x6d, 0x61, 0x70, 0x18, 0x04, 0x20, 0x03, - 0x28, 0x0b, 0x32, 0x2e, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, - 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, - 0x72, 0x79, 0x52, 0x06, 0x6b, 0x65, 0x79, 0x4d, 0x61, 0x70, 0x1a, 0x39, 0x0a, 0x0b, 0x4b, 0x65, - 0x79, 0x4d, 0x61, 0x70, 0x45, 0x6e, 0x74, 0x72, 0x79, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, 0x79, - 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x14, 0x0a, 0x05, 0x76, - 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, - 0x65, 0x3a, 0x02, 0x38, 0x01, 0x22, 0x3f, 0x0a, 0x06, 0x52, 0x65, 0x61, 0x73, 0x6f, 0x6e, 0x12, - 0x12, 0x0a, 0x0e, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x55, 0x4e, 0x4b, 0x4e, 0x4f, 0x57, - 0x4e, 0x10, 0x00, 0x12, 0x0f, 0x0a, 0x0b, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x4d, 0x49, - 0x53, 0x53, 0x10, 0x01, 0x12, 0x10, 0x0a, 0x0c, 0x52, 0x45, 0x41, 0x53, 0x4f, 0x4e, 0x5f, 0x53, - 0x54, 0x41, 0x4c, 0x45, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x4a, 0x04, 0x08, 0x02, - 0x10, 0x03, 0x52, 0x06, 0x73, 0x65, 0x72, 0x76, 0x65, 0x72, 0x52, 0x04, 0x70, 0x61, 0x74, 0x68, - 0x22, 0x5e, 0x0a, 0x13, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, - 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, - 0x74, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x07, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x73, 0x12, 0x1f, 0x0a, 0x0b, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x5f, 0x64, 0x61, 0x74, 0x61, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x68, 0x65, 0x61, 0x64, 0x65, 0x72, 0x44, 0x61, - 0x74, 0x61, 0x4a, 0x04, 0x08, 0x01, 0x10, 0x02, 0x52, 0x06, 0x74, 0x61, 0x72, 0x67, 0x65, 0x74, - 0x32, 0x6e, 0x0a, 0x12, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x53, - 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x58, 0x0a, 0x0b, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, - 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x12, 0x22, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, - 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, - 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, - 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, - 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, - 0x42, 0x4d, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, - 0x75, 0x70, 0x2e, 0x76, 0x31, 0x42, 0x08, 0x52, 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, - 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, - 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, - 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x76, 0x31, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x74, 0x1a, 0x23, 0x2e, 0x67, 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, + 0x76, 0x31, 0x2e, 0x52, 0x6f, 0x75, 0x74, 0x65, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x42, 0x4d, 0x0a, 0x11, 0x69, 0x6f, 0x2e, 0x67, + 0x72, 0x70, 0x63, 0x2e, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2e, 0x76, 0x31, 0x42, 0x08, 0x52, + 0x6c, 0x73, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x67, 0x72, 0x70, + 0x63, 0x2f, 0x6c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x2f, 0x67, 0x72, 0x70, 0x63, 0x5f, 0x6c, 0x6f, + 0x6f, 0x6b, 0x75, 0x70, 0x5f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -291,17 +318,20 @@ var file_grpc_lookup_v1_rls_proto_goTypes = []interface{}{ (*RouteLookupRequest)(nil), // 1: grpc.lookup.v1.RouteLookupRequest (*RouteLookupResponse)(nil), // 2: grpc.lookup.v1.RouteLookupResponse nil, // 3: grpc.lookup.v1.RouteLookupRequest.KeyMapEntry + (*anypb.Any)(nil), // 4: google.protobuf.Any } var file_grpc_lookup_v1_rls_proto_depIdxs = []int32{ 0, // 0: grpc.lookup.v1.RouteLookupRequest.reason:type_name -> grpc.lookup.v1.RouteLookupRequest.Reason 3, // 1: grpc.lookup.v1.RouteLookupRequest.key_map:type_name -> grpc.lookup.v1.RouteLookupRequest.KeyMapEntry - 1, // 2: grpc.lookup.v1.RouteLookupService.RouteLookup:input_type -> grpc.lookup.v1.RouteLookupRequest - 2, // 3: grpc.lookup.v1.RouteLookupService.RouteLookup:output_type -> grpc.lookup.v1.RouteLookupResponse - 3, // [3:4] is the sub-list for method output_type - 2, // [2:3] is the sub-list for method input_type - 2, // [2:2] is the sub-list for extension type_name - 2, // [2:2] is the sub-list for extension extendee - 0, // [0:2] is the sub-list for field type_name + 4, // 2: grpc.lookup.v1.RouteLookupRequest.extensions:type_name -> google.protobuf.Any + 4, // 3: grpc.lookup.v1.RouteLookupResponse.extensions:type_name -> google.protobuf.Any + 1, // 4: grpc.lookup.v1.RouteLookupService.RouteLookup:input_type -> grpc.lookup.v1.RouteLookupRequest + 2, // 5: grpc.lookup.v1.RouteLookupService.RouteLookup:output_type -> grpc.lookup.v1.RouteLookupResponse + 5, // [5:6] is the sub-list for method output_type + 4, // [4:5] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name } func init() { file_grpc_lookup_v1_rls_proto_init() } diff --git a/vendor/google.golang.org/grpc/internal/status/status.go b/vendor/google.golang.org/grpc/internal/status/status.go index 4cf85cad9f810..03ef2fedd5cb5 100644 --- a/vendor/google.golang.org/grpc/internal/status/status.go +++ b/vendor/google.golang.org/grpc/internal/status/status.go @@ -43,6 +43,34 @@ type Status struct { s *spb.Status } +// NewWithProto returns a new status including details from statusProto. This +// is meant to be used by the gRPC library only. +func NewWithProto(code codes.Code, message string, statusProto []string) *Status { + if len(statusProto) != 1 { + // No grpc-status-details bin header, or multiple; just ignore. + return &Status{s: &spb.Status{Code: int32(code), Message: message}} + } + st := &spb.Status{} + if err := proto.Unmarshal([]byte(statusProto[0]), st); err != nil { + // Probably not a google.rpc.Status proto; do not provide details. + return &Status{s: &spb.Status{Code: int32(code), Message: message}} + } + if st.Code == int32(code) { + // The codes match between the grpc-status header and the + // grpc-status-details-bin header; use the full details proto. + return &Status{s: st} + } + return &Status{ + s: &spb.Status{ + Code: int32(codes.Internal), + Message: fmt.Sprintf( + "grpc-status-details-bin mismatch: grpc-status=%v, grpc-message=%q, grpc-status-details-bin=%+v", + code, message, st, + ), + }, + } +} + // New returns a Status representing c and msg. func New(c codes.Code, msg string) *Status { return &Status{s: &spb.Status{Code: int32(c), Message: msg}} diff --git a/vendor/google.golang.org/grpc/internal/transport/handler_server.go b/vendor/google.golang.org/grpc/internal/transport/handler_server.go index 98f80e3fa00aa..17f7a21b5a9f0 100644 --- a/vendor/google.golang.org/grpc/internal/transport/handler_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/handler_server.go @@ -220,18 +220,20 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro h.Set("Grpc-Message", encodeGrpcMessage(m)) } + s.hdrMu.Lock() if p := st.Proto(); p != nil && len(p.Details) > 0 { + delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. panic(err) } - h.Set("Grpc-Status-Details-Bin", encodeBinHeader(stBytes)) + h.Set(grpcStatusDetailsBinHeader, encodeBinHeader(stBytes)) } - if md := s.Trailer(); len(md) > 0 { - for k, vv := range md { + if len(s.trailer) > 0 { + for k, vv := range s.trailer { // Clients don't tolerate reading restricted headers after some non restricted ones were sent. if isReservedHeader(k) { continue @@ -243,6 +245,7 @@ func (ht *serverHandlerTransport) WriteStatus(s *Stream, st *status.Status) erro } } } + s.hdrMu.Unlock() }) if err == nil { // transport has not been closed @@ -287,7 +290,7 @@ func (ht *serverHandlerTransport) writeCommonHeaders(s *Stream) { } // writeCustomHeaders sets custom headers set on the stream via SetHeader -// on the first write call (Write, WriteHeader, or WriteStatus). +// on the first write call (Write, WriteHeader, or WriteStatus) func (ht *serverHandlerTransport) writeCustomHeaders(s *Stream) { h := ht.rw.Header() @@ -344,7 +347,7 @@ func (ht *serverHandlerTransport) WriteHeader(s *Stream, md metadata.MD) error { return err } -func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream), traceCtx func(context.Context, string) context.Context) { +func (ht *serverHandlerTransport) HandleStreams(startStream func(*Stream)) { // With this transport type there will be exactly 1 stream: this HTTP request. ctx := ht.req.Context() diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_client.go b/vendor/google.golang.org/grpc/internal/transport/http2_client.go index badab8acf3b11..d6f5c49358b58 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_client.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_client.go @@ -1399,7 +1399,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { mdata = make(map[string][]string) contentTypeErr = "malformed header: missing HTTP content-type" grpcMessage string - statusGen *status.Status recvCompress string httpStatusCode *int httpStatusErr string @@ -1434,12 +1433,6 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { rawStatusCode = codes.Code(uint32(code)) case "grpc-message": grpcMessage = decodeGrpcMessage(hf.Value) - case "grpc-status-details-bin": - var err error - statusGen, err = decodeGRPCStatusDetails(hf.Value) - if err != nil { - headerError = fmt.Sprintf("transport: malformed grpc-status-details-bin: %v", err) - } case ":status": if hf.Value == "200" { httpStatusErr = "" @@ -1548,14 +1541,12 @@ func (t *http2Client) operateHeaders(frame *http2.MetaHeadersFrame) { return } - if statusGen == nil { - statusGen = status.New(rawStatusCode, grpcMessage) - } + status := istatus.NewWithProto(rawStatusCode, grpcMessage, mdata[grpcStatusDetailsBinHeader]) // If client received END_STREAM from server while stream was still active, // send RST_STREAM. rstStream := s.getState() == streamActive - t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, statusGen, mdata, true) + t.closeStream(s, io.EOF, rstStream, http2.ErrCodeNo, status, mdata, true) } // readServerPreface reads and handles the initial settings frame from the diff --git a/vendor/google.golang.org/grpc/internal/transport/http2_server.go b/vendor/google.golang.org/grpc/internal/transport/http2_server.go index c06db679d89cc..6fa1eb41992a0 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http2_server.go +++ b/vendor/google.golang.org/grpc/internal/transport/http2_server.go @@ -342,7 +342,7 @@ func NewServerTransport(conn net.Conn, config *ServerConfig) (_ ServerTransport, // operateHeaders takes action on the decoded headers. Returns an error if fatal // error encountered and transport needs to close, otherwise returns nil. -func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream), traceCtx func(context.Context, string) context.Context) error { +func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func(*Stream)) error { // Acquire max stream ID lock for entire duration t.maxStreamMu.Lock() defer t.maxStreamMu.Unlock() @@ -561,7 +561,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( } if t.inTapHandle != nil { var err error - if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method}); err != nil { + if s.ctx, err = t.inTapHandle(s.ctx, &tap.Info{FullMethodName: s.method, Header: mdata}); err != nil { t.mu.Unlock() if t.logger.V(logLevel) { t.logger.Infof("Aborting the stream early due to InTapHandle failure: %v", err) @@ -592,7 +592,6 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( s.requestRead = func(n int) { t.adjustWindow(s, uint32(n)) } - s.ctx = traceCtx(s.ctx, s.method) for _, sh := range t.stats { s.ctx = sh.TagRPC(s.ctx, &stats.RPCTagInfo{FullMethodName: s.method}) inHeader := &stats.InHeader{ @@ -630,7 +629,7 @@ func (t *http2Server) operateHeaders(frame *http2.MetaHeadersFrame, handle func( // HandleStreams receives incoming streams using the given handler. This is // typically run in a separate goroutine. // traceCtx attaches trace to ctx and returns the new context. -func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context.Context, string) context.Context) { +func (t *http2Server) HandleStreams(handle func(*Stream)) { defer close(t.readerDone) for { t.controlBuf.throttle() @@ -665,7 +664,7 @@ func (t *http2Server) HandleStreams(handle func(*Stream), traceCtx func(context. } switch frame := frame.(type) { case *http2.MetaHeadersFrame: - if err := t.operateHeaders(frame, handle, traceCtx); err != nil { + if err := t.operateHeaders(frame, handle); err != nil { t.Close(err) break } @@ -1053,12 +1052,15 @@ func (t *http2Server) WriteStatus(s *Stream, st *status.Status) error { headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-message", Value: encodeGrpcMessage(st.Message())}) if p := st.Proto(); p != nil && len(p.Details) > 0 { + // Do not use the user's grpc-status-details-bin (if present) if we are + // even attempting to set our own. + delete(s.trailer, grpcStatusDetailsBinHeader) stBytes, err := proto.Marshal(p) if err != nil { // TODO: return error instead, when callers are able to handle it. t.logger.Errorf("Failed to marshal rpc status: %s, error: %v", pretty.ToJSON(p), err) } else { - headerFields = append(headerFields, hpack.HeaderField{Name: "grpc-status-details-bin", Value: encodeBinHeader(stBytes)}) + headerFields = append(headerFields, hpack.HeaderField{Name: grpcStatusDetailsBinHeader, Value: encodeBinHeader(stBytes)}) } } diff --git a/vendor/google.golang.org/grpc/internal/transport/http_util.go b/vendor/google.golang.org/grpc/internal/transport/http_util.go index 1958140082b35..dc29d590e91fb 100644 --- a/vendor/google.golang.org/grpc/internal/transport/http_util.go +++ b/vendor/google.golang.org/grpc/internal/transport/http_util.go @@ -34,12 +34,9 @@ import ( "time" "unicode/utf8" - "github.com/golang/protobuf/proto" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" - spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) const ( @@ -88,6 +85,8 @@ var ( } ) +var grpcStatusDetailsBinHeader = "grpc-status-details-bin" + // isReservedHeader checks whether hdr belongs to HTTP2 headers // reserved by gRPC protocol. Any other headers are classified as the // user-specified metadata. @@ -103,7 +102,6 @@ func isReservedHeader(hdr string) bool { "grpc-message", "grpc-status", "grpc-timeout", - "grpc-status-details-bin", // Intentionally exclude grpc-previous-rpc-attempts and // grpc-retry-pushback-ms, which are "reserved", but their API // intentionally works via metadata. @@ -154,18 +152,6 @@ func decodeMetadataHeader(k, v string) (string, error) { return v, nil } -func decodeGRPCStatusDetails(rawDetails string) (*status.Status, error) { - v, err := decodeBinHeader(rawDetails) - if err != nil { - return nil, err - } - st := &spb.Status{} - if err = proto.Unmarshal(v, st); err != nil { - return nil, err - } - return status.FromProto(st), nil -} - type timeoutUnit uint8 const ( diff --git a/vendor/google.golang.org/grpc/internal/transport/transport.go b/vendor/google.golang.org/grpc/internal/transport/transport.go index 74a811fc0590b..aac056e723bb5 100644 --- a/vendor/google.golang.org/grpc/internal/transport/transport.go +++ b/vendor/google.golang.org/grpc/internal/transport/transport.go @@ -698,7 +698,7 @@ type ClientTransport interface { // Write methods for a given Stream will be called serially. type ServerTransport interface { // HandleStreams receives incoming streams using the given handler. - HandleStreams(func(*Stream), func(context.Context, string) context.Context) + HandleStreams(func(*Stream)) // WriteHeader sends the header metadata for the given stream. // WriteHeader may not be called on all streams. diff --git a/vendor/google.golang.org/grpc/orca/producer.go b/vendor/google.golang.org/grpc/orca/producer.go index 2d58725547fc0..04edae6de66f1 100644 --- a/vendor/google.golang.org/grpc/orca/producer.go +++ b/vendor/google.golang.org/grpc/orca/producer.go @@ -24,6 +24,7 @@ import ( "google.golang.org/grpc" "google.golang.org/grpc/balancer" "google.golang.org/grpc/codes" + "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/orca/internal" "google.golang.org/grpc/status" @@ -169,48 +170,29 @@ func (p *producer) updateRunLocked() { func (p *producer) run(ctx context.Context, done chan struct{}, interval time.Duration) { defer close(done) - backoffAttempt := 0 - backoffTimer := time.NewTimer(0) - for ctx.Err() == nil { - select { - case <-backoffTimer.C: - case <-ctx.Done(): - return - } - + runStream := func() error { resetBackoff, err := p.runStream(ctx, interval) - - if resetBackoff { - backoffTimer.Reset(0) - backoffAttempt = 0 - } else { - backoffTimer.Reset(p.backoff(backoffAttempt)) - backoffAttempt++ - } - - switch { - case err == nil: - // No error was encountered; restart the stream. - case ctx.Err() != nil: - // Producer was stopped; exit immediately and without logging an - // error. - return - case status.Code(err) == codes.Unimplemented: + if status.Code(err) == codes.Unimplemented { // Unimplemented; do not retry. logger.Error("Server doesn't support ORCA OOB load reporting protocol; not listening for load reports.") - return - case status.Code(err) == codes.Unavailable, status.Code(err) == codes.Canceled: - // TODO: these codes should ideally log an error, too, but for now - // we receive them when shutting down the ClientConn (Unavailable - // if the stream hasn't started yet, and Canceled if it happens - // mid-stream). Once we can determine the state or ensure the - // producer is stopped before the stream ends, we can log an error - // when it's not a natural shutdown. - default: - // Log all other errors. + return err + } + // Retry for all other errors. + if code := status.Code(err); code != codes.Unavailable && code != codes.Canceled { + // TODO: Unavailable and Canceled should also ideally log an error, + // but for now we receive them when shutting down the ClientConn + // (Unavailable if the stream hasn't started yet, and Canceled if it + // happens mid-stream). Once we can determine the state or ensure + // the producer is stopped before the stream ends, we can log an + // error when it's not a natural shutdown. logger.Error("Received unexpected stream error:", err) } + if resetBackoff { + return backoff.ErrResetBackoff + } + return nil } + backoff.RunF(ctx, runStream, p.backoff) } // runStream runs a single stream on the subchannel and returns the resulting diff --git a/vendor/google.golang.org/grpc/resolver/manual/manual.go b/vendor/google.golang.org/grpc/resolver/manual/manual.go index e6b0f14cd941f..0a4262342f358 100644 --- a/vendor/google.golang.org/grpc/resolver/manual/manual.go +++ b/vendor/google.golang.org/grpc/resolver/manual/manual.go @@ -26,7 +26,9 @@ import ( "google.golang.org/grpc/resolver" ) -// NewBuilderWithScheme creates a new test resolver builder with the given scheme. +// NewBuilderWithScheme creates a new manual resolver builder with the given +// scheme. Every instance of the manual resolver may only ever be used with a +// single grpc.ClientConn. Otherwise, bad things will happen. func NewBuilderWithScheme(scheme string) *Resolver { return &Resolver{ BuildCallback: func(resolver.Target, resolver.ClientConn, resolver.BuildOptions) {}, @@ -58,30 +60,34 @@ type Resolver struct { scheme string // Fields actually belong to the resolver. - mu sync.Mutex // Guards access to CC. - CC resolver.ClientConn - bootstrapState *resolver.State + // Guards access to below fields. + mu sync.Mutex + CC resolver.ClientConn + // Storing the most recent state update makes this resolver resilient to + // restarts, which is possible with channel idleness. + lastSeenState *resolver.State } // InitialState adds initial state to the resolver so that UpdateState doesn't // need to be explicitly called after Dial. func (r *Resolver) InitialState(s resolver.State) { - r.bootstrapState = &s + r.lastSeenState = &s } // Build returns itself for Resolver, because it's both a builder and a resolver. func (r *Resolver) Build(target resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { + r.BuildCallback(target, cc, opts) r.mu.Lock() r.CC = cc - r.mu.Unlock() - r.BuildCallback(target, cc, opts) - if r.bootstrapState != nil { - r.UpdateState(*r.bootstrapState) + if r.lastSeenState != nil { + err := r.CC.UpdateState(*r.lastSeenState) + go r.UpdateStateCallback(err) } + r.mu.Unlock() return r, nil } -// Scheme returns the test scheme. +// Scheme returns the manual resolver's scheme. func (r *Resolver) Scheme() string { return r.scheme } @@ -100,6 +106,7 @@ func (r *Resolver) Close() { func (r *Resolver) UpdateState(s resolver.State) { r.mu.Lock() err := r.CC.UpdateState(s) + r.lastSeenState = &s r.mu.Unlock() r.UpdateStateCallback(err) } diff --git a/vendor/google.golang.org/grpc/server.go b/vendor/google.golang.org/grpc/server.go index eeae92fbe0204..8f60d421437d9 100644 --- a/vendor/google.golang.org/grpc/server.go +++ b/vendor/google.golang.org/grpc/server.go @@ -983,7 +983,7 @@ func (s *Server) serveStreams(st transport.ServerTransport) { f := func() { defer streamQuota.release() defer wg.Done() - s.handleStream(st, stream, s.traceInfo(st, stream)) + s.handleStream(st, stream) } if s.opts.numServerWorkers > 0 { @@ -995,12 +995,6 @@ func (s *Server) serveStreams(st transport.ServerTransport) { } } go f() - }, func(ctx context.Context, method string) context.Context { - if !EnableTracing { - return ctx - } - tr := trace.New("grpc.Recv."+methodFamily(method), method) - return trace.NewContext(ctx, tr) }) wg.Wait() } @@ -1049,30 +1043,6 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, r *http.Request) { s.serveStreams(st) } -// traceInfo returns a traceInfo and associates it with stream, if tracing is enabled. -// If tracing is not enabled, it returns nil. -func (s *Server) traceInfo(st transport.ServerTransport, stream *transport.Stream) (trInfo *traceInfo) { - if !EnableTracing { - return nil - } - tr, ok := trace.FromContext(stream.Context()) - if !ok { - return nil - } - - trInfo = &traceInfo{ - tr: tr, - firstLine: firstLine{ - client: false, - remoteAddr: st.RemoteAddr(), - }, - } - if dl, ok := stream.Context().Deadline(); ok { - trInfo.firstLine.deadline = time.Until(dl) - } - return trInfo -} - func (s *Server) addConn(addr string, st transport.ServerTransport) bool { s.mu.Lock() defer s.mu.Unlock() @@ -1133,7 +1103,7 @@ func (s *Server) incrCallsFailed() { atomic.AddInt64(&s.czData.callsFailed, 1) } -func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { +func (s *Server) sendResponse(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, msg any, cp Compressor, opts *transport.Options, comp encoding.Compressor) error { data, err := encode(s.getCodec(stream.ContentSubtype()), msg) if err != nil { channelz.Error(logger, s.channelzID, "grpc: server failed to encode response: ", err) @@ -1152,7 +1122,7 @@ func (s *Server) sendResponse(t transport.ServerTransport, stream *transport.Str err = t.Write(stream, hdr, payload, opts) if err == nil { for _, sh := range s.opts.statsHandlers { - sh.HandleRPC(stream.Context(), outPayload(false, msg, data, payload, time.Now())) + sh.HandleRPC(ctx, outPayload(false, msg, data, payload, time.Now())) } } return err @@ -1194,7 +1164,7 @@ func getChainUnaryHandler(interceptors []UnaryServerInterceptor, curr int, info } } -func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { +func (s *Server) processUnaryRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, md *MethodDesc, trInfo *traceInfo) (err error) { shs := s.opts.statsHandlers if len(shs) != 0 || trInfo != nil || channelz.IsOn() { if channelz.IsOn() { @@ -1208,7 +1178,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. IsClientStream: false, IsServerStream: false, } - sh.HandleRPC(stream.Context(), statsBegin) + sh.HandleRPC(ctx, statsBegin) } if trInfo != nil { trInfo.tr.LazyLog(&trInfo.firstLine, false) @@ -1240,7 +1210,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if err != nil && err != io.EOF { end.Error = toRPCErr(err) } - sh.HandleRPC(stream.Context(), end) + sh.HandleRPC(ctx, end) } if channelz.IsOn() { @@ -1262,7 +1232,6 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } } if len(binlogs) != 0 { - ctx := stream.Context() md, _ := metadata.FromIncomingContext(ctx) logEntry := &binarylog.ClientHeader{ Header: md, @@ -1348,7 +1317,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. return status.Errorf(codes.Internal, "grpc: error unmarshalling request: %v", err) } for _, sh := range shs { - sh.HandleRPC(stream.Context(), &stats.InPayload{ + sh.HandleRPC(ctx, &stats.InPayload{ RecvTime: time.Now(), Payload: v, Length: len(d), @@ -1362,7 +1331,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Message: d, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), cm) + binlog.Log(ctx, cm) } } if trInfo != nil { @@ -1370,7 +1339,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. } return nil } - ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ctx = NewContextWithServerTransportStream(ctx, stream) reply, appErr := md.Handler(info.serviceImpl, ctx, df, s.opts.unaryInt) if appErr != nil { appStatus, ok := status.FromError(appErr) @@ -1395,7 +1364,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Header: h, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) + binlog.Log(ctx, sh) } } st := &binarylog.ServerTrailer{ @@ -1403,7 +1372,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return appErr @@ -1418,7 +1387,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. if stream.SendCompress() != sendCompressorName { comp = encoding.GetCompressor(stream.SendCompress()) } - if err := s.sendResponse(t, stream, reply, cp, opts, comp); err != nil { + if err := s.sendResponse(ctx, t, stream, reply, cp, opts, comp); err != nil { if err == io.EOF { // The entire stream is done (for unary RPC only). return err @@ -1445,8 +1414,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) - binlog.Log(stream.Context(), st) + binlog.Log(ctx, sh) + binlog.Log(ctx, st) } } return err @@ -1460,8 +1429,8 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Message: reply, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), sh) - binlog.Log(stream.Context(), sm) + binlog.Log(ctx, sh) + binlog.Log(ctx, sm) } } if channelz.IsOn() { @@ -1479,7 +1448,7 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport. Err: appErr, } for _, binlog := range binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return t.WriteStatus(stream, statusOK) @@ -1521,7 +1490,7 @@ func getChainStreamHandler(interceptors []StreamServerInterceptor, curr int, inf } } -func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { +func (s *Server) processStreamingRPC(ctx context.Context, t transport.ServerTransport, stream *transport.Stream, info *serviceInfo, sd *StreamDesc, trInfo *traceInfo) (err error) { if channelz.IsOn() { s.incrCallsStarted() } @@ -1535,10 +1504,10 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp IsServerStream: sd.ServerStreams, } for _, sh := range shs { - sh.HandleRPC(stream.Context(), statsBegin) + sh.HandleRPC(ctx, statsBegin) } } - ctx := NewContextWithServerTransportStream(stream.Context(), stream) + ctx = NewContextWithServerTransportStream(ctx, stream) ss := &serverStream{ ctx: ctx, t: t, @@ -1574,7 +1543,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp end.Error = toRPCErr(err) } for _, sh := range shs { - sh.HandleRPC(stream.Context(), end) + sh.HandleRPC(ctx, end) } } @@ -1616,7 +1585,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp logEntry.PeerAddr = peer.Addr } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), logEntry) + binlog.Log(ctx, logEntry) } } @@ -1694,7 +1663,7 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } t.WriteStatus(ss.s, appStatus) @@ -1712,33 +1681,50 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp Err: appErr, } for _, binlog := range ss.binlogs { - binlog.Log(stream.Context(), st) + binlog.Log(ctx, st) } } return t.WriteStatus(ss.s, statusOK) } -func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream, trInfo *traceInfo) { +func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Stream) { + ctx := stream.Context() + var ti *traceInfo + if EnableTracing { + tr := trace.New("grpc.Recv."+methodFamily(stream.Method()), stream.Method()) + ctx = trace.NewContext(ctx, tr) + ti = &traceInfo{ + tr: tr, + firstLine: firstLine{ + client: false, + remoteAddr: t.RemoteAddr(), + }, + } + if dl, ok := ctx.Deadline(); ok { + ti.firstLine.deadline = time.Until(dl) + } + } + sm := stream.Method() if sm != "" && sm[0] == '/' { sm = sm[1:] } pos := strings.LastIndex(sm, "/") if pos == -1 { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"Malformed method name %q", []any{sm}}, true) + ti.tr.SetError() } errDesc := fmt.Sprintf("malformed method name: %q", stream.Method()) if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ti.tr.SetError() } channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } - if trInfo != nil { - trInfo.tr.Finish() + if ti != nil { + ti.tr.Finish() } return } @@ -1748,17 +1734,17 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str srv, knownService := s.services[service] if knownService { if md, ok := srv.methods[method]; ok { - s.processUnaryRPC(t, stream, srv, md, trInfo) + s.processUnaryRPC(ctx, t, stream, srv, md, ti) return } if sd, ok := srv.streams[method]; ok { - s.processStreamingRPC(t, stream, srv, sd, trInfo) + s.processStreamingRPC(ctx, t, stream, srv, sd, ti) return } } // Unknown service, or known server unknown method. if unknownDesc := s.opts.unknownStreamDesc; unknownDesc != nil { - s.processStreamingRPC(t, stream, nil, unknownDesc, trInfo) + s.processStreamingRPC(ctx, t, stream, nil, unknownDesc, ti) return } var errDesc string @@ -1767,19 +1753,19 @@ func (s *Server) handleStream(t transport.ServerTransport, stream *transport.Str } else { errDesc = fmt.Sprintf("unknown method %v for service %v", method, service) } - if trInfo != nil { - trInfo.tr.LazyPrintf("%s", errDesc) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyPrintf("%s", errDesc) + ti.tr.SetError() } if err := t.WriteStatus(stream, status.New(codes.Unimplemented, errDesc)); err != nil { - if trInfo != nil { - trInfo.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) - trInfo.tr.SetError() + if ti != nil { + ti.tr.LazyLog(&fmtStringer{"%v", []any{err}}, true) + ti.tr.SetError() } channelz.Warningf(logger, s.channelzID, "grpc: Server.handleStream failed to write status: %v", err) } - if trInfo != nil { - trInfo.tr.Finish() + if ti != nil { + ti.tr.Finish() } } diff --git a/vendor/google.golang.org/grpc/tap/tap.go b/vendor/google.golang.org/grpc/tap/tap.go index bfa5dfa40e4d1..07f0125768808 100644 --- a/vendor/google.golang.org/grpc/tap/tap.go +++ b/vendor/google.golang.org/grpc/tap/tap.go @@ -27,6 +27,8 @@ package tap import ( "context" + + "google.golang.org/grpc/metadata" ) // Info defines the relevant information needed by the handles. @@ -34,6 +36,10 @@ type Info struct { // FullMethodName is the string of grpc method (in the format of // /package.service/method). FullMethodName string + + // Header contains the header metadata received. + Header metadata.MD + // TODO: More to be added. } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 724ad21021300..6d2cadd79a9b9 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.58.3" +const Version = "1.59.0" diff --git a/vendor/google.golang.org/grpc/vet.sh b/vendor/google.golang.org/grpc/vet.sh index bbc9e2e3c8e36..bb480f1f9cca1 100644 --- a/vendor/google.golang.org/grpc/vet.sh +++ b/vendor/google.golang.org/grpc/vet.sh @@ -93,6 +93,9 @@ git grep -l -e 'grpclog.I' --or -e 'grpclog.W' --or -e 'grpclog.E' --or -e 'grpc # - Ensure all ptypes proto packages are renamed when importing. not git grep "\(import \|^\s*\)\"github.com/golang/protobuf/ptypes/" -- "*.go" +# - Ensure all usages of grpc_testing package are renamed when importing. +not git grep "\(import \|^\s*\)\"google.golang.org/grpc/interop/grpc_testing" -- "*.go" + # - Ensure all xds proto imports are renamed to *pb or *grpc. git grep '"github.com/envoyproxy/go-control-plane/envoy' -- '*.go' ':(exclude)*.pb.go' | not grep -v 'pb "\|grpc "' diff --git a/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go b/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go index f8f749835c24f..074154a751be3 100644 --- a/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go +++ b/vendor/google.golang.org/grpc/xds/googledirectpath/googlec2p.go @@ -47,9 +47,8 @@ import ( ) const ( - c2pScheme = "google-c2p" - c2pExperimentalScheme = "google-c2p-experimental" - c2pAuthority = "traffic-director-c2p.xds.googleapis.com" + c2pScheme = "google-c2p" + c2pAuthority = "traffic-director-c2p.xds.googleapis.com" tdURL = "dns:///directpath-pa.googleapis.com" httpReqTimeout = 10 * time.Second @@ -77,18 +76,10 @@ var ( ) func init() { - resolver.Register(c2pResolverBuilder{ - scheme: c2pScheme, - }) - // TODO(apolcyn): remove this experimental scheme before the 1.52 release - resolver.Register(c2pResolverBuilder{ - scheme: c2pExperimentalScheme, - }) + resolver.Register(c2pResolverBuilder{}) } -type c2pResolverBuilder struct { - scheme string -} +type c2pResolverBuilder struct{} func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts resolver.BuildOptions) (resolver.Resolver, error) { if t.URL.Host != "" { @@ -165,7 +156,7 @@ func (c2pResolverBuilder) Build(t resolver.Target, cc resolver.ClientConn, opts } func (b c2pResolverBuilder) Scheme() string { - return b.scheme + return c2pScheme } type c2pResolver struct { diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go index 85a081d09df55..34c3592180750 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cdsbalancer.go @@ -18,8 +18,8 @@ package cdsbalancer import ( + "context" "encoding/json" - "errors" "fmt" "google.golang.org/grpc/balancer" @@ -28,7 +28,6 @@ import ( "google.golang.org/grpc/credentials" "google.golang.org/grpc/credentials/tls/certprovider" "google.golang.org/grpc/internal/balancer/nop" - "google.golang.org/grpc/internal/buffer" xdsinternal "google.golang.org/grpc/internal/credentials/xds" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpclog" @@ -42,11 +41,13 @@ import ( ) const ( - cdsName = "cds_experimental" + cdsName = "cds_experimental" + aggregateClusterMaxDepth = 16 ) var ( - errBalancerClosed = errors.New("cds_experimental LB policy is closed") + errBalancerClosed = fmt.Errorf("cds_experimental LB policy is closed") + errExceedsMaxDepth = fmt.Errorf("aggregate cluster graph exceeds max depth (%d)", aggregateClusterMaxDepth) // newChildBalancer is a helper function to build a new cluster_resolver // balancer and will be overridden in unittests. @@ -81,22 +82,29 @@ func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Bal logger.Errorf("%q LB policy is needed but not registered", clusterresolver.Name) return nop.NewBalancer(cc, fmt.Errorf("%q LB policy is needed but not registered", clusterresolver.Name)) } - crParser, ok := builder.(balancer.ConfigParser) + parser, ok := builder.(balancer.ConfigParser) if !ok { // Shouldn't happen, imported Cluster Resolver builder has this method. logger.Errorf("%q LB policy does not implement a config parser", clusterresolver.Name) return nop.NewBalancer(cc, fmt.Errorf("%q LB policy does not implement a config parser", clusterresolver.Name)) } + + ctx, cancel := context.WithCancel(context.Background()) b := &cdsBalancer{ - bOpts: opts, - updateCh: buffer.NewUnbounded(), - closed: grpcsync.NewEvent(), - done: grpcsync.NewEvent(), - crParser: crParser, - xdsHI: xdsinternal.NewHandshakeInfo(nil, nil), + bOpts: opts, + childConfigParser: parser, + serializer: grpcsync.NewCallbackSerializer(ctx), + serializerCancel: cancel, + xdsHI: xdsinternal.NewHandshakeInfo(nil, nil), + watchers: make(map[string]*watcherState), + } + b.ccw = &ccWrapper{ + ClientConn: cc, + xdsHI: b.xdsHI, } b.logger = prefixLogger((b)) b.logger.Infof("Created") + var creds credentials.TransportCredentials switch { case opts.DialCreds != nil: @@ -108,12 +116,6 @@ func (bb) Build(cc balancer.ClientConn, opts balancer.BuildOptions) balancer.Bal b.xdsCredsInUse = true } b.logger.Infof("xDS credentials in use: %v", b.xdsCredsInUse) - b.clusterHandler = newClusterHandler(b) - b.ccw = &ccWrapper{ - ClientConn: cc, - xdsHI: b.xdsHI, - } - go b.run() return b } @@ -139,61 +141,45 @@ func (bb) ParseConfig(c json.RawMessage) (serviceconfig.LoadBalancingConfig, err return &cfg, nil } -// ccUpdate wraps a clientConn update received from gRPC (pushed from the -// xdsResolver). A valid clusterName causes the cdsBalancer to register a CDS -// watcher with the xdsClient, while a non-nil error causes it to cancel the -// existing watch and propagate the error to the underlying cluster_resolver -// balancer. -type ccUpdate struct { - clusterName string - err error -} - -type exitIdle struct{} - // cdsBalancer implements a CDS based LB policy. It instantiates a // cluster_resolver balancer to further resolve the serviceName received from // CDS, into localities and endpoints. Implements the balancer.Balancer // interface which is exposed to gRPC and implements the balancer.ClientConn // interface which is exposed to the cluster_resolver balancer. type cdsBalancer struct { - ccw *ccWrapper // ClientConn interface passed to child LB. - bOpts balancer.BuildOptions // BuildOptions passed to child LB. - updateCh *buffer.Unbounded // Channel for gRPC and xdsClient updates. - xdsClient xdsclient.XDSClient // xDS client to watch Cluster resource. - clusterHandler *clusterHandler // To watch the clusters. - childLB balancer.Balancer - logger *grpclog.PrefixLogger - closed *grpcsync.Event - done *grpcsync.Event - crParser balancer.ConfigParser + // The following fields are initialized at build time and are either + // read-only after that or provide their own synchronization, and therefore + // do not need to be guarded by a mutex. + ccw *ccWrapper // ClientConn interface passed to child LB. + bOpts balancer.BuildOptions // BuildOptions passed to child LB. + childConfigParser balancer.ConfigParser // Config parser for cluster_resolver LB policy. + xdsHI *xdsinternal.HandshakeInfo // Handshake info from security configuration. + logger *grpclog.PrefixLogger // Prefix logger for all logging. + + // The serializer and its cancel func are initialized at build time, and the + // rest of the fields here are only accessed from serializer callbacks (or + // from balancer.Balancer methods, which themselves are guaranteed to be + // mutually exclusive) and hence do not need to be guarded by a mutex. + serializer *grpcsync.CallbackSerializer // Serializes updates from gRPC and xDS client. + serializerCancel context.CancelFunc // Stops the above serializer. + childLB balancer.Balancer // Child policy, built upon resolution of the cluster graph. + xdsClient xdsclient.XDSClient // xDS client to watch Cluster resources. + watchers map[string]*watcherState // Set of watchers and associated state, keyed by cluster name. + lbCfg *lbConfig // Current load balancing configuration. // The certificate providers are cached here to that they can be closed when // a new provider is to be created. cachedRoot certprovider.Provider cachedIdentity certprovider.Provider - xdsHI *xdsinternal.HandshakeInfo xdsCredsInUse bool } -// handleClientConnUpdate handles a ClientConnUpdate received from gRPC. Good -// updates lead to registration of a CDS watch. Updates with error lead to -// cancellation of existing watch and propagation of the same error to the -// cluster_resolver balancer. -func (b *cdsBalancer) handleClientConnUpdate(update *ccUpdate) { - // We first handle errors, if any, and then proceed with handling the - // update, only if the status quo has changed. - if err := update.err; err != nil { - b.handleErrorFromUpdate(err, true) - return - } - b.clusterHandler.updateRootCluster(update.clusterName) -} - // handleSecurityConfig processes the security configuration received from the // management server, creates appropriate certificate provider plugins, and // updates the HandhakeInfo which is added as an address attribute in // NewSubConn() calls. +// +// Only executed in the context of a serializer callback. func (b *cdsBalancer) handleSecurityConfig(config *xdsresource.SecurityConfig) error { // If xdsCredentials are not in use, i.e, the user did not want to get // security configuration from an xDS server, we should not be acting on the @@ -220,7 +206,7 @@ func (b *cdsBalancer) handleSecurityConfig(config *xdsresource.SecurityConfig) e // Bootstrap did not find any certificate provider configs, but the user // has specified xdsCredentials and the management server has sent down // security configuration. - return errors.New("xds: certificate_providers config missing in bootstrap file") + return fmt.Errorf("xds: certificate_providers config missing in bootstrap file") } cpc := bc.CertProviderConfigs @@ -278,220 +264,29 @@ func buildProviderFunc(configs map[string]*certprovider.BuildableConfig, instanc return provider, nil } -// handleWatchUpdate handles a watch update from the xDS Client. Good updates -// lead to clientConn updates being invoked on the underlying cluster_resolver balancer. -func (b *cdsBalancer) handleWatchUpdate(update clusterHandlerUpdate) { - if err := update.err; err != nil { - b.logger.Warningf("Watch error from xds-client %p: %v", b.xdsClient, err) - b.handleErrorFromUpdate(err, false) - return - } - - b.logger.Infof("Received Cluster resource contains content: %s, security config: %s", pretty.ToJSON(update.updates), pretty.ToJSON(update.securityCfg)) - - // Process the security config from the received update before building the - // child policy or forwarding the update to it. We do this because the child - // policy may try to create a new subConn inline. Processing the security - // configuration here and setting up the handshakeInfo will make sure that - // such attempts are handled properly. - if err := b.handleSecurityConfig(update.securityCfg); err != nil { - // If the security config is invalid, for example, if the provider - // instance is not found in the bootstrap config, we need to put the - // channel in transient failure. - b.logger.Warningf("Received Cluster resource contains invalid security config: %v", err) - b.handleErrorFromUpdate(err, false) - return - } - - // The first good update from the watch API leads to the instantiation of an - // cluster_resolver balancer. Further updates/errors are propagated to the existing - // cluster_resolver balancer. - if b.childLB == nil { - childLB, err := newChildBalancer(b.ccw, b.bOpts) - if err != nil { - b.logger.Errorf("Failed to create child policy of type %s: %v", clusterresolver.Name, err) - return - } - b.childLB = childLB - b.logger.Infof("Created child policy %p of type %s", b.childLB, clusterresolver.Name) - } - - dms := make([]clusterresolver.DiscoveryMechanism, len(update.updates)) - for i, cu := range update.updates { - switch cu.ClusterType { - case xdsresource.ClusterTypeEDS: - dms[i] = clusterresolver.DiscoveryMechanism{ - Type: clusterresolver.DiscoveryMechanismTypeEDS, - Cluster: cu.ClusterName, - EDSServiceName: cu.EDSServiceName, - MaxConcurrentRequests: cu.MaxRequests, - } - if cu.LRSServerConfig == xdsresource.ClusterLRSServerSelf { - bootstrapConfig := b.xdsClient.BootstrapConfig() - parsedName := xdsresource.ParseName(cu.ClusterName) - if parsedName.Scheme == xdsresource.FederationScheme { - // Is a federation resource name, find the corresponding - // authority server config. - if cfg, ok := bootstrapConfig.Authorities[parsedName.Authority]; ok { - dms[i].LoadReportingServer = cfg.XDSServer - } - } else { - // Not a federation resource name, use the default - // authority. - dms[i].LoadReportingServer = bootstrapConfig.XDSServer - } - } - case xdsresource.ClusterTypeLogicalDNS: - dms[i] = clusterresolver.DiscoveryMechanism{ - Type: clusterresolver.DiscoveryMechanismTypeLogicalDNS, - Cluster: cu.ClusterName, - DNSHostname: cu.DNSHostName, - } - default: - b.logger.Infof("Unexpected cluster type %v when handling update from cluster handler", cu.ClusterType) - } - if envconfig.XDSOutlierDetection { - odJSON := cu.OutlierDetection - // "In the cds LB policy, if the outlier_detection field is not set in - // the Cluster resource, a "no-op" outlier_detection config will be - // generated in the corresponding DiscoveryMechanism config, with all - // fields unset." - A50 - if odJSON == nil { - // This will pick up top level defaults in Cluster Resolver - // ParseConfig, but sre and fpe will be nil still so still a - // "no-op" config. - odJSON = json.RawMessage(`{}`) - } - dms[i].OutlierDetection = odJSON - } - } - - // Prepare Cluster Resolver config, marshal into JSON, and then Parse it to - // get configuration to send downward to Cluster Resolver. - lbCfg := &clusterresolver.LBConfig{ - DiscoveryMechanisms: dms, - XDSLBPolicy: update.lbPolicy, - } - crLBCfgJSON, err := json.Marshal(lbCfg) - if err != nil { - // Shouldn't happen, since we just prepared struct. - b.logger.Errorf("cds_balancer: error marshalling prepared config: %v", lbCfg) - return - } - - var sc serviceconfig.LoadBalancingConfig - if sc, err = b.crParser.ParseConfig(crLBCfgJSON); err != nil { - b.logger.Errorf("cds_balancer: cluster_resolver config generated %v is invalid: %v", string(crLBCfgJSON), err) - return - } - - ccState := balancer.ClientConnState{ - ResolverState: xdsclient.SetClient(resolver.State{}, b.xdsClient), - BalancerConfig: sc, +// A convenience method to create a watcher for cluster `name`. It also +// registers the watch with the xDS client, and adds the newly created watcher +// to the list of watchers maintained by the LB policy. +func (b *cdsBalancer) createAndAddWatcherForCluster(name string) { + w := &clusterWatcher{ + name: name, + parent: b, } - if err := b.childLB.UpdateClientConnState(ccState); err != nil { - b.logger.Errorf("Encountered error when sending config {%+v} to child policy: %v", ccState, err) - } -} - -// run is a long-running goroutine which handles all updates from gRPC. All -// methods which are invoked directly by gRPC or xdsClient simply push an -// update onto a channel which is read and acted upon right here. -func (b *cdsBalancer) run() { - for { - select { - case u, ok := <-b.updateCh.Get(): - if !ok { - return - } - b.updateCh.Load() - switch update := u.(type) { - case *ccUpdate: - b.handleClientConnUpdate(update) - case exitIdle: - if b.childLB == nil { - b.logger.Errorf("Received ExitIdle with no child policy") - break - } - // This implementation assumes the child balancer supports - // ExitIdle (but still checks for the interface's existence to - // avoid a panic if not). If the child does not, no subconns - // will be connected. - if ei, ok := b.childLB.(balancer.ExitIdler); ok { - ei.ExitIdle() - } - } - case u := <-b.clusterHandler.updateChannel: - b.handleWatchUpdate(u) - case <-b.closed.Done(): - b.clusterHandler.close() - if b.childLB != nil { - b.childLB.Close() - b.childLB = nil - } - if b.cachedRoot != nil { - b.cachedRoot.Close() - } - if b.cachedIdentity != nil { - b.cachedIdentity.Close() - } - b.updateCh.Close() - b.logger.Infof("Shutdown") - b.done.Fire() - return - } - } -} - -// handleErrorFromUpdate handles both the error from parent ClientConn (from -// resolver) and the error from xds client (from the watcher). fromParent is -// true if error is from parent ClientConn. -// -// If the error is connection error, it's passed down to the child policy. -// Nothing needs to be done in CDS (e.g. it doesn't go into fallback). -// -// If the error is resource-not-found: -// - If it's from resolver, it means LDS resources were removed. The CDS watch -// should be canceled. -// - If it's from xds client, it means CDS resource were removed. The CDS -// watcher should keep watching. -// -// In both cases, the error will be forwarded to the child balancer. And if -// error is resource-not-found, the child balancer will stop watching EDS. -func (b *cdsBalancer) handleErrorFromUpdate(err error, fromParent bool) { - // This is not necessary today, because xds client never sends connection - // errors. - if fromParent && xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceNotFound { - b.clusterHandler.close() - } - if b.childLB != nil { - if xdsresource.ErrType(err) != xdsresource.ErrorTypeConnection { - // Connection errors will be sent to the child balancers directly. - // There's no need to forward them. - b.childLB.ResolverError(err) - } - } else { - // If child balancer was never created, fail the RPCs with - // errors. - b.ccw.UpdateState(balancer.State{ - ConnectivityState: connectivity.TransientFailure, - Picker: base.NewErrPicker(err), - }) + ws := &watcherState{ + watcher: w, + cancelWatch: xdsresource.WatchCluster(b.xdsClient, name, w), } + b.watchers[name] = ws } // UpdateClientConnState receives the serviceConfig (which contains the // clusterName to watch for in CDS) and the xdsClient object from the // xdsResolver. func (b *cdsBalancer) UpdateClientConnState(state balancer.ClientConnState) error { - if b.closed.HasFired() { - b.logger.Errorf("Received balancer config after close") - return errBalancerClosed - } - if b.xdsClient == nil { c := xdsclient.FromResolverState(state.ResolverState) if c == nil { + b.logger.Warningf("Received balancer config with no xDS client") return balancer.ErrBadResolverState } b.xdsClient = c @@ -510,17 +305,49 @@ func (b *cdsBalancer) UpdateClientConnState(state balancer.ClientConnState) erro b.logger.Warningf("Received balancer config with no cluster name") return balancer.ErrBadResolverState } - b.updateCh.Put(&ccUpdate{clusterName: lbCfg.ClusterName}) + + // Do nothing and return early if configuration has not changed. + if b.lbCfg != nil && b.lbCfg.ClusterName == lbCfg.ClusterName { + return nil + } + b.lbCfg = lbCfg + + // Handle the update in a blocking fashion. + done := make(chan struct{}) + ok = b.serializer.Schedule(func(context.Context) { + // A config update with a changed top-level cluster name means that none + // of our old watchers make any sense any more. + b.closeAllWatchers() + + // Create a new watcher for the top-level cluster. Upon resolution, it + // could end up creating more watchers if turns out to be an aggregate + // cluster. + b.createAndAddWatcherForCluster(lbCfg.ClusterName) + close(done) + }) + if !ok { + // The call to Schedule returns false *only* if the serializer has been + // closed, which happens only when we receive an update after close. + return errBalancerClosed + } + <-done return nil } // ResolverError handles errors reported by the xdsResolver. func (b *cdsBalancer) ResolverError(err error) { - if b.closed.HasFired() { - b.logger.Warningf("Received resolver error after close: %v", err) - return - } - b.updateCh.Put(&ccUpdate{err: err}) + b.serializer.Schedule(func(context.Context) { + // Resource not found error is reported by the resolver when the + // top-level cluster resource is removed by the management server. + if xdsresource.ErrType(err) == xdsresource.ErrorTypeResourceNotFound { + b.closeAllWatchers() + } + var root string + if b.lbCfg != nil { + root = b.lbCfg.ClusterName + } + b.onClusterError(root, err) + }) } // UpdateSubConnState handles subConn updates from gRPC. @@ -528,15 +355,303 @@ func (b *cdsBalancer) UpdateSubConnState(sc balancer.SubConn, state balancer.Sub b.logger.Errorf("UpdateSubConnState(%v, %+v) called unexpectedly", sc, state) } +// Closes all registered cluster wathers and removes them from the internal map. +// +// Only executed in the context of a serializer callback. +func (b *cdsBalancer) closeAllWatchers() { + for name, state := range b.watchers { + state.cancelWatch() + delete(b.watchers, name) + } +} + // Close cancels the CDS watch, closes the child policy and closes the // cdsBalancer. func (b *cdsBalancer) Close() { - b.closed.Fire() - <-b.done.Done() + b.serializer.Schedule(func(ctx context.Context) { + b.closeAllWatchers() + + if b.childLB != nil { + b.childLB.Close() + b.childLB = nil + } + if b.cachedRoot != nil { + b.cachedRoot.Close() + } + if b.cachedIdentity != nil { + b.cachedIdentity.Close() + } + b.logger.Infof("Shutdown") + }) + b.serializerCancel() + <-b.serializer.Done() } func (b *cdsBalancer) ExitIdle() { - b.updateCh.Put(exitIdle{}) + b.serializer.Schedule(func(context.Context) { + if b.childLB == nil { + b.logger.Warningf("Received ExitIdle with no child policy") + return + } + // This implementation assumes the child balancer supports + // ExitIdle (but still checks for the interface's existence to + // avoid a panic if not). If the child does not, no subconns + // will be connected. + if ei, ok := b.childLB.(balancer.ExitIdler); ok { + ei.ExitIdle() + } + }) +} + +// Handles a good Cluster update from the xDS client. Kicks off the discovery +// mechanism generation process from the top-level cluster and if the cluster +// graph is resolved, generates child policy config and pushes it down. +// +// Only executed in the context of a serializer callback. +func (b *cdsBalancer) onClusterUpdate(name string, update xdsresource.ClusterUpdate) { + state := b.watchers[name] + if state == nil { + // We are currently not watching this cluster anymore. Return early. + return + } + + b.logger.Infof("Received Cluster resource: %s", pretty.ToJSON(update)) + + // Update the watchers map with the update for the cluster. + state.lastUpdate = &update + + // For an aggregate cluster, always use the security configuration on the + // root cluster. + if name == b.lbCfg.ClusterName { + // Process the security config from the received update before building the + // child policy or forwarding the update to it. We do this because the child + // policy may try to create a new subConn inline. Processing the security + // configuration here and setting up the handshakeInfo will make sure that + // such attempts are handled properly. + if err := b.handleSecurityConfig(update.SecurityCfg); err != nil { + // If the security config is invalid, for example, if the provider + // instance is not found in the bootstrap config, we need to put the + // channel in transient failure. + b.onClusterError(name, fmt.Errorf("received Cluster resource contains invalid security config: %v", err)) + return + } + } + + clustersSeen := make(map[string]bool) + dms, ok, err := b.generateDMsForCluster(b.lbCfg.ClusterName, 0, nil, clustersSeen) + if err != nil { + b.onClusterError(b.lbCfg.ClusterName, fmt.Errorf("failed to generate discovery mechanisms: %v", err)) + return + } + if ok { + if len(dms) == 0 { + b.onClusterError(b.lbCfg.ClusterName, fmt.Errorf("aggregate cluster graph has no leaf clusters")) + return + } + // Child policy is built the first time we resolve the cluster graph. + if b.childLB == nil { + childLB, err := newChildBalancer(b.ccw, b.bOpts) + if err != nil { + b.logger.Errorf("Failed to create child policy of type %s: %v", clusterresolver.Name, err) + return + } + b.childLB = childLB + b.logger.Infof("Created child policy %p of type %s", b.childLB, clusterresolver.Name) + } + + // Prepare the child policy configuration, convert it to JSON, have it + // parsed by the child policy to convert it into service config and push + // an update to it. + childCfg := &clusterresolver.LBConfig{ + DiscoveryMechanisms: dms, + // The LB policy is configured by the root cluster. + XDSLBPolicy: b.watchers[b.lbCfg.ClusterName].lastUpdate.LBPolicy, + } + cfgJSON, err := json.Marshal(childCfg) + if err != nil { + // Shouldn't happen, since we just prepared struct. + b.logger.Errorf("cds_balancer: error marshalling prepared config: %v", childCfg) + return + } + + var sc serviceconfig.LoadBalancingConfig + if sc, err = b.childConfigParser.ParseConfig(cfgJSON); err != nil { + b.logger.Errorf("cds_balancer: cluster_resolver config generated %v is invalid: %v", string(cfgJSON), err) + return + } + + ccState := balancer.ClientConnState{ + ResolverState: xdsclient.SetClient(resolver.State{}, b.xdsClient), + BalancerConfig: sc, + } + if err := b.childLB.UpdateClientConnState(ccState); err != nil { + b.logger.Errorf("Encountered error when sending config {%+v} to child policy: %v", ccState, err) + } + } + // We no longer need the clusters that we did not see in this iteration of + // generateDMsForCluster(). + for cluster := range clustersSeen { + state, ok := b.watchers[cluster] + if ok { + continue + } + state.cancelWatch() + delete(b.watchers, cluster) + } +} + +// Handles an error Cluster update from the xDS client. Propagates the error +// down to the child policy if one exists, or puts the channel in +// TRANSIENT_FAILURE. +// +// Only executed in the context of a serializer callback. +func (b *cdsBalancer) onClusterError(name string, err error) { + b.logger.Warningf("Cluster resource %q received error update: %v", name, err) + + if b.childLB != nil { + if xdsresource.ErrType(err) != xdsresource.ErrorTypeConnection { + // Connection errors will be sent to the child balancers directly. + // There's no need to forward them. + b.childLB.ResolverError(err) + } + } else { + // If child balancer was never created, fail the RPCs with + // errors. + b.ccw.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(fmt.Errorf("%q: %v", name, err)), + }) + } +} + +// Handles a resource-not-found error from the xDS client. Propagates the error +// down to the child policy if one exists, or puts the channel in +// TRANSIENT_FAILURE. +// +// Only executed in the context of a serializer callback. +func (b *cdsBalancer) onClusterResourceNotFound(name string) { + err := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "resource name %q of type Cluster not found in received response", name) + if b.childLB != nil { + b.childLB.ResolverError(err) + } else { + // If child balancer was never created, fail the RPCs with errors. + b.ccw.UpdateState(balancer.State{ + ConnectivityState: connectivity.TransientFailure, + Picker: base.NewErrPicker(err), + }) + } +} + +// Generates discovery mechanisms for the cluster graph rooted at `name`. This +// method is called recursively if `name` corresponds to an aggregate cluster, +// with the base case for recursion being a leaf cluster. If a new cluster is +// encountered when traversing the graph, a watcher is created for it. +// +// Inputs: +// - name: name of the cluster to start from +// - depth: recursion depth of the current cluster, starting from root +// - dms: prioritized list of current discovery mechanisms +// - clustersSeen: cluster names seen so far in the graph traversal +// +// Outputs: +// - new prioritized list of discovery mechanisms +// - boolean indicating if traversal of the aggregate cluster graph is +// complete. If false, the above list of discovery mechanisms is ignored. +// - error indicating if any error was encountered as part of the graph +// traversal. If error is non-nil, the other return values are ignored. +// +// Only executed in the context of a serializer callback. +func (b *cdsBalancer) generateDMsForCluster(name string, depth int, dms []clusterresolver.DiscoveryMechanism, clustersSeen map[string]bool) ([]clusterresolver.DiscoveryMechanism, bool, error) { + if depth >= aggregateClusterMaxDepth { + return dms, false, errExceedsMaxDepth + } + + if clustersSeen[name] { + // Discovery mechanism already seen through a different branch. + return dms, true, nil + } + clustersSeen[name] = true + + state, ok := b.watchers[name] + if !ok { + // If we have not seen this cluster so far, create a watcher for it, add + // it to the map, start the watch and return. + b.createAndAddWatcherForCluster(name) + + // And since we just created the watcher, we know that we haven't + // resolved the cluster graph yet. + return dms, false, nil + } + + // A watcher exists, but no update has been received yet. + if state.lastUpdate == nil { + return dms, false, nil + } + + var dm clusterresolver.DiscoveryMechanism + cluster := state.lastUpdate + switch cluster.ClusterType { + case xdsresource.ClusterTypeAggregate: + // This boolean is used to track if any of the clusters in the graph is + // not yet completely resolved or returns errors, thereby allowing us to + // traverse as much of the graph as possible (and start the associated + // watches where required) to ensure that clustersSeen contains all + // clusters in the graph that we can traverse to. + missingCluster := false + var err error + for _, child := range cluster.PrioritizedClusterNames { + var ok bool + dms, ok, err = b.generateDMsForCluster(child, depth+1, dms, clustersSeen) + if err != nil || !ok { + missingCluster = true + } + } + return dms, !missingCluster, err + case xdsresource.ClusterTypeEDS: + dm = clusterresolver.DiscoveryMechanism{ + Type: clusterresolver.DiscoveryMechanismTypeEDS, + Cluster: cluster.ClusterName, + EDSServiceName: cluster.EDSServiceName, + MaxConcurrentRequests: cluster.MaxRequests, + } + if cluster.LRSServerConfig == xdsresource.ClusterLRSServerSelf { + bootstrapConfig := b.xdsClient.BootstrapConfig() + parsedName := xdsresource.ParseName(cluster.ClusterName) + if parsedName.Scheme == xdsresource.FederationScheme { + // Is a federation resource name, find the corresponding + // authority server config. + if cfg, ok := bootstrapConfig.Authorities[parsedName.Authority]; ok { + dm.LoadReportingServer = cfg.XDSServer + } + } else { + // Not a federation resource name, use the default + // authority. + dm.LoadReportingServer = bootstrapConfig.XDSServer + } + } + case xdsresource.ClusterTypeLogicalDNS: + dm = clusterresolver.DiscoveryMechanism{ + Type: clusterresolver.DiscoveryMechanismTypeLogicalDNS, + Cluster: cluster.ClusterName, + DNSHostname: cluster.DNSHostName, + } + } + if envconfig.XDSOutlierDetection { + odJSON := cluster.OutlierDetection + // "In the cds LB policy, if the outlier_detection field is not set in + // the Cluster resource, a "no-op" outlier_detection config will be + // generated in the corresponding DiscoveryMechanism config, with all + // fields unset." - A50 + if odJSON == nil { + // This will pick up top level defaults in Cluster Resolver + // ParseConfig, but sre and fpe will be nil still so still a + // "no-op" config. + odJSON = json.RawMessage(`{}`) + } + dm.OutlierDetection = odJSON + } + + return append(dms, dm), true, nil } // ccWrapper wraps the balancer.ClientConn passed to the CDS balancer at diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_handler.go b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_handler.go deleted file mode 100644 index aa2d9674a7904..0000000000000 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_handler.go +++ /dev/null @@ -1,368 +0,0 @@ -/* - * Copyright 2021 gRPC authors. - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package cdsbalancer - -import ( - "encoding/json" - "errors" - "sync" - - "google.golang.org/grpc/xds/internal/xdsclient" - "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" -) - -const maxDepth = 16 - -var ( - errNotReceivedUpdate = errors.New("tried to construct a cluster update on a cluster that has not received an update") - errExceedsMaxDepth = errors.New("aggregate cluster graph exceeds max depth") -) - -// clusterHandlerUpdate wraps the information received from the registered CDS -// watcher. A non-nil error is propagated to the underlying cluster_resolver -// balancer. A valid update results in creating a new cluster_resolver balancer -// (if one doesn't already exist) and pushing the update to it. -type clusterHandlerUpdate struct { - // securityCfg is the Security Config from the top (root) cluster. - securityCfg *xdsresource.SecurityConfig - - // lbPolicy is the the child of the cluster_impl policy, for all priorities. - lbPolicy json.RawMessage - - // updates is a list of ClusterUpdates from all the leaf clusters. - updates []xdsresource.ClusterUpdate - err error -} - -// clusterHandler will be given a name representing a cluster. It will then -// update the CDS policy constantly with a list of Clusters to pass down to -// XdsClusterResolverLoadBalancingPolicyConfig in a stream like fashion. -type clusterHandler struct { - parent *cdsBalancer - - // A mutex to protect entire tree of clusters. - clusterMutex sync.Mutex - rootClusterName string - - createdClusters map[string]*clusterNode - - // A way to ping CDS Balancer about any updates or errors to a Node in the - // tree. This will either get called from this handler constructing an - // update or from a child with an error. Capacity of one as the only update - // CDS Balancer cares about is the most recent update. - updateChannel chan clusterHandlerUpdate -} - -func newClusterHandler(parent *cdsBalancer) *clusterHandler { - return &clusterHandler{ - parent: parent, - updateChannel: make(chan clusterHandlerUpdate, 1), - createdClusters: make(map[string]*clusterNode), - } -} - -func (ch *clusterHandler) updateRootCluster(rootClusterName string) { - ch.clusterMutex.Lock() - defer ch.clusterMutex.Unlock() - if ch.createdClusters[ch.rootClusterName] == nil { - // Construct a root node on first update. - createClusterNode(rootClusterName, ch.parent.xdsClient, ch, 0) - ch.rootClusterName = rootClusterName - return - } - // Check if root cluster was changed. If it was, delete old one and start - // new one, if not do nothing. - if rootClusterName != ch.rootClusterName { - ch.createdClusters[ch.rootClusterName].delete() - createClusterNode(rootClusterName, ch.parent.xdsClient, ch, 0) - ch.rootClusterName = rootClusterName - } -} - -// This function tries to construct a cluster update to send to CDS. -func (ch *clusterHandler) constructClusterUpdate() { - if ch.createdClusters[ch.rootClusterName] == nil { - // If root is nil, this handler is closed, ignore the update. - return - } - clusterUpdate, err := ch.createdClusters[ch.rootClusterName].constructClusterUpdate(make(map[string]bool)) - if err != nil { - // If there was an error received no op, as this can mean one of the - // children hasn't received an update yet, or the graph continued to - // stay in an error state. If the graph continues to stay in an error - // state, no new error needs to be written to the update buffer as that - // would be redundant information. - return - } - if clusterUpdate == nil { - // This means that there was an aggregated cluster with no EDS or DNS as - // leaf nodes. No update to be written. - return - } - // For a ClusterUpdate, the only update CDS cares about is the most - // recent one, so opportunistically drain the update channel before - // sending the new update. - select { - case <-ch.updateChannel: - default: - } - - ch.updateChannel <- clusterHandlerUpdate{ - securityCfg: ch.createdClusters[ch.rootClusterName].clusterUpdate.SecurityCfg, - lbPolicy: ch.createdClusters[ch.rootClusterName].clusterUpdate.LBPolicy, - updates: clusterUpdate, - } -} - -// close() is meant to be called by CDS when the CDS balancer is closed, and it -// cancels the watches for every cluster in the cluster tree. -func (ch *clusterHandler) close() { - ch.clusterMutex.Lock() - defer ch.clusterMutex.Unlock() - if ch.createdClusters[ch.rootClusterName] == nil { - return - } - ch.createdClusters[ch.rootClusterName].delete() - ch.rootClusterName = "" -} - -// This logically represents a cluster. This handles all the logic for starting -// and stopping a cluster watch, handling any updates, and constructing a list -// recursively for the ClusterHandler. -type clusterNode struct { - // A way to cancel the watch for the cluster. - cancelFunc func() - - // A list of children, as the Node can be an aggregate Cluster. - children []string - - // A ClusterUpdate in order to build a list of cluster updates for CDS to - // send down to child XdsClusterResolverLoadBalancingPolicy. - clusterUpdate xdsresource.ClusterUpdate - - // This boolean determines whether this Node has received an update or not. - // This isn't the best practice, but this will protect a list of Cluster - // Updates from being constructed if a cluster in the tree has not received - // an update yet. - receivedUpdate bool - - clusterHandler *clusterHandler - - depth int32 - refCount int32 - - // maxDepthErr is set if this cluster node is an aggregate cluster and has a - // child that causes the graph to exceed the maximum depth allowed. This is - // used to show a cluster graph as being in an error state when it constructs - // a cluster update. - maxDepthErr error -} - -// CreateClusterNode creates a cluster node from a given clusterName. This will -// also start the watch for that cluster. -func createClusterNode(clusterName string, xdsClient xdsclient.XDSClient, topLevelHandler *clusterHandler, depth int32) { - // If the cluster has already been created, simply return, which ignores - // duplicates. - if topLevelHandler.createdClusters[clusterName] != nil { - topLevelHandler.createdClusters[clusterName].refCount++ - return - } - c := &clusterNode{ - clusterHandler: topLevelHandler, - depth: depth, - refCount: 1, - } - // Communicate with the xds client here. - topLevelHandler.parent.logger.Infof("CDS watch started on %v", clusterName) - cancel := xdsClient.WatchCluster(clusterName, c.handleResp) - c.cancelFunc = func() { - topLevelHandler.parent.logger.Infof("CDS watch canceled on %v", clusterName) - cancel() - } - topLevelHandler.createdClusters[clusterName] = c -} - -// This function cancels the cluster watch on the cluster and all of it's -// children. -func (c *clusterNode) delete() { - c.refCount-- - if c.refCount == 0 { - c.cancelFunc() - delete(c.clusterHandler.createdClusters, c.clusterUpdate.ClusterName) - for _, child := range c.children { - if c.clusterHandler.createdClusters[child] != nil { - c.clusterHandler.createdClusters[child].delete() - } - } - } -} - -// Construct cluster update (potentially a list of ClusterUpdates) for a node. -func (c *clusterNode) constructClusterUpdate(clustersSeen map[string]bool) ([]xdsresource.ClusterUpdate, error) { - // If the cluster has not yet received an update, the cluster update is not - // yet ready. - if !c.receivedUpdate { - return nil, errNotReceivedUpdate - } - if c.maxDepthErr != nil { - return nil, c.maxDepthErr - } - // Ignore duplicates. It's ok to ignore duplicates because the second - // occurrence of a cluster will never be used. I.e. in [C, D, C], the second - // C will never be used (the only way to fall back to lower priority D is if - // C is down, which means second C will never be chosen). Thus, [C, D, C] is - // logically equivalent to [C, D]. - if clustersSeen[c.clusterUpdate.ClusterName] { - return []xdsresource.ClusterUpdate{}, nil - } - clustersSeen[c.clusterUpdate.ClusterName] = true - - // Base case - LogicalDNS or EDS. Both of these cluster types will be tied - // to a single ClusterUpdate. - if c.clusterUpdate.ClusterType != xdsresource.ClusterTypeAggregate { - return []xdsresource.ClusterUpdate{c.clusterUpdate}, nil - } - - // If an aggregate construct a list by recursively calling down to all of - // it's children. - var childrenUpdates []xdsresource.ClusterUpdate - for _, child := range c.children { - childUpdateList, err := c.clusterHandler.createdClusters[child].constructClusterUpdate(clustersSeen) - if err != nil { - return nil, err - } - childrenUpdates = append(childrenUpdates, childUpdateList...) - } - return childrenUpdates, nil -} - -// handleResp handles a xds response for a particular cluster. This function -// also handles any logic with regards to any child state that may have changed. -// At the end of the handleResp(), the clusterUpdate will be pinged in certain -// situations to try and construct an update to send back to CDS. -func (c *clusterNode) handleResp(clusterUpdate xdsresource.ClusterUpdate, err error) { - c.clusterHandler.clusterMutex.Lock() - defer c.clusterHandler.clusterMutex.Unlock() - if err != nil { // Write this error for run() to pick up in CDS LB policy. - // For a ClusterUpdate, the only update CDS cares about is the most - // recent one, so opportunistically drain the update channel before - // sending the new update. - select { - case <-c.clusterHandler.updateChannel: - default: - } - c.clusterHandler.updateChannel <- clusterHandlerUpdate{err: err} - c.receivedUpdate = false - c.maxDepthErr = nil - return - } - - c.receivedUpdate = true - c.clusterUpdate = clusterUpdate - - // If the cluster was a leaf node, if the cluster update received had change - // in the cluster update then the overall cluster update would change and - // there is a possibility for the overall update to build so ping cluster - // handler to return. Also, if there was any children from previously, - // delete the children, as the cluster type is no longer an aggregate - // cluster. - if clusterUpdate.ClusterType != xdsresource.ClusterTypeAggregate { - for _, child := range c.children { - c.clusterHandler.createdClusters[child].delete() - } - c.children = nil - c.maxDepthErr = nil - // This is an update in the one leaf node, should try to send an update - // to the parent CDS balancer. - // - // Note that this update might be a duplicate from the previous one. - // Because the update contains not only the cluster name to watch, but - // also the extra fields (e.g. security config). There's no good way to - // compare all the fields. - c.clusterHandler.constructClusterUpdate() - return - } - - // Aggregate cluster handling. - if len(clusterUpdate.PrioritizedClusterNames) >= 1 { - if c.depth == maxDepth-1 { - // For a ClusterUpdate, the only update CDS cares about is the most - // recent one, so opportunistically drain the update channel before - // sending the new update. - select { - case <-c.clusterHandler.updateChannel: - default: - } - c.clusterHandler.updateChannel <- clusterHandlerUpdate{err: errExceedsMaxDepth} - c.children = []string{} - c.maxDepthErr = errExceedsMaxDepth - return - } - } - - newChildren := make(map[string]bool) - for _, childName := range clusterUpdate.PrioritizedClusterNames { - newChildren[childName] = true - } - - // These booleans help determine whether this callback will ping the overall - // clusterHandler to try and construct an update to send back to CDS. This - // will be determined by whether there would be a change in the overall - // clusterUpdate for the whole tree (ex. change in clusterUpdate for current - // cluster or a deleted child) and also if there's even a possibility for - // the update to build (ex. if a child is created and a watch is started, - // that child hasn't received an update yet due to the mutex lock on this - // callback). - var createdChild bool - - // This map will represent the current children of the cluster. It will be - // first added to in order to represent the new children. It will then have - // any children deleted that are no longer present. - mapCurrentChildren := make(map[string]bool) - for _, child := range c.children { - mapCurrentChildren[child] = true - } - - // Add and construct any new child nodes. - for child := range newChildren { - if _, inChildrenAlready := mapCurrentChildren[child]; !inChildrenAlready { - createClusterNode(child, c.clusterHandler.parent.xdsClient, c.clusterHandler, c.depth+1) - } - } - - // Delete any child nodes no longer in the aggregate cluster's children. - for child := range mapCurrentChildren { - if _, stillAChild := newChildren[child]; !stillAChild { - c.clusterHandler.createdClusters[child].delete() - delete(mapCurrentChildren, child) - } - } - - c.children = clusterUpdate.PrioritizedClusterNames - - c.maxDepthErr = nil - // If the cluster is an aggregate cluster, if this callback created any new - // child cluster nodes, then there's no possibility for a full cluster - // update to successfully build, as those created children will not have - // received an update yet. Even if this update did not delete a child, there - // is still a possibility for the cluster update to build, as the aggregate - // cluster can ignore duplicated children and thus the update can fill out - // the full cluster update tree. - if !createdChild { - c.clusterHandler.constructClusterUpdate() - } -} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_watcher.go b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_watcher.go new file mode 100644 index 0000000000000..0b0d168376d74 --- /dev/null +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/cdsbalancer/cluster_watcher.go @@ -0,0 +1,58 @@ +/* + * Copyright 2023 gRPC authors. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package cdsbalancer + +import ( + "context" + + "google.golang.org/grpc/xds/internal/xdsclient/xdsresource" +) + +// clusterWatcher implements the xdsresource.ClusterWatcher interface, and is +// passed to the xDS client as part of the WatchResource() API. +// +// It watches a single cluster and handles callbacks from the xDS client by +// scheduling them on the parent LB policy's serializer. +type clusterWatcher struct { + name string + parent *cdsBalancer +} + +func (cw *clusterWatcher) OnUpdate(u *xdsresource.ClusterResourceData) { + cw.parent.serializer.Schedule(func(context.Context) { + cw.parent.onClusterUpdate(cw.name, u.Resource) + }) +} + +func (cw *clusterWatcher) OnError(err error) { + cw.parent.serializer.Schedule(func(context.Context) { + cw.parent.onClusterError(cw.name, err) + }) +} + +func (cw *clusterWatcher) OnResourceDoesNotExist() { + cw.parent.serializer.Schedule(func(context.Context) { + cw.parent.onClusterResourceNotFound(cw.name) + }) +} + +// watcherState groups the state associated with a clusterWatcher. +type watcherState struct { + watcher *clusterWatcher // The underlying watcher. + cancelWatch func() // Cancel func to cancel the watch. + lastUpdate *xdsresource.ClusterUpdate // Most recent update received for this cluster. +} diff --git a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go index b9a81e9ba8293..151c54dae6d09 100644 --- a/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go +++ b/vendor/google.golang.org/grpc/xds/internal/balancer/clusterresolver/resource_resolver.go @@ -200,7 +200,7 @@ func (rr *resourceResolver) updateMechanisms(mechanisms []DiscoveryMechanism) { for dm, r := range rr.childrenMap { if !newDMs[dm] { delete(rr.childrenMap, dm) - r.r.stop() + go r.r.stop() } } // Regenerate even if there's no change in discovery mechanism, in case diff --git a/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go b/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go index 02470ddca5e45..06f6a47519c41 100644 --- a/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go +++ b/vendor/google.golang.org/grpc/xds/internal/resolver/serviceconfig.go @@ -31,6 +31,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/internal/envconfig" "google.golang.org/grpc/internal/grpcrand" + "google.golang.org/grpc/internal/grpcutil" iresolver "google.golang.org/grpc/internal/resolver" "google.golang.org/grpc/internal/serviceconfig" "google.golang.org/grpc/internal/wrr" @@ -229,19 +230,30 @@ func retryConfigToPolicy(config *xdsresource.RetryConfig) *serviceconfig.RetryPo func (cs *configSelector) generateHash(rpcInfo iresolver.RPCInfo, hashPolicies []*xdsresource.HashPolicy) uint64 { var hash uint64 var generatedHash bool + var md, emd metadata.MD + var mdRead bool for _, policy := range hashPolicies { var policyHash uint64 var generatedPolicyHash bool switch policy.HashPolicyType { case xdsresource.HashPolicyTypeHeader: - md, ok := metadata.FromOutgoingContext(rpcInfo.Context) - if !ok { + if strings.HasSuffix(policy.HeaderName, "-bin") { continue } - values := md.Get(policy.HeaderName) - // If the header isn't present, no-op. + if !mdRead { + md, _ = metadata.FromOutgoingContext(rpcInfo.Context) + emd, _ = grpcutil.ExtraMetadata(rpcInfo.Context) + mdRead = true + } + values := emd.Get(policy.HeaderName) if len(values) == 0 { - continue + // Extra metadata (e.g. the "content-type" header) takes + // precedence over the user's metadata. + values = md.Get(policy.HeaderName) + if len(values) == 0 { + // If the header isn't present at all, this policy is a no-op. + continue + } } joinedValues := strings.Join(values, ",") if policy.Regex != nil { diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go index 44f6d3bc0a1cf..542c5e025fd1b 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/client.go @@ -32,7 +32,6 @@ import ( type XDSClient interface { WatchListener(string, func(xdsresource.ListenerUpdate, error)) func() WatchRouteConfig(string, func(xdsresource.RouteConfigUpdate, error)) func() - WatchCluster(string, func(xdsresource.ClusterUpdate, error)) func() // WatchResource uses xDS to discover the resource associated with the // provided resource name. The resource type implementation determines how diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go index e503349dbc29a..5866221e2696d 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/clientimpl_watchers.go @@ -81,37 +81,6 @@ func (c *clientImpl) WatchRouteConfig(resourceName string, cb func(xdsresource.R return xdsresource.WatchRouteConfig(c, resourceName, watcher) } -// This is only required temporarily, while we modify the -// clientImpl.WatchCluster API to be implemented via the wrapper WatchCluster() -// API which calls the WatchResource() API. -type clusterWatcher struct { - resourceName string - cb func(xdsresource.ClusterUpdate, error) -} - -func (c *clusterWatcher) OnUpdate(update *xdsresource.ClusterResourceData) { - c.cb(update.Resource, nil) -} - -func (c *clusterWatcher) OnError(err error) { - c.cb(xdsresource.ClusterUpdate{}, err) -} - -func (c *clusterWatcher) OnResourceDoesNotExist() { - err := xdsresource.NewErrorf(xdsresource.ErrorTypeResourceNotFound, "resource name %q of type Cluster not found in received response", c.resourceName) - c.cb(xdsresource.ClusterUpdate{}, err) -} - -// WatchCluster uses CDS to discover information about the Cluster resource -// identified by resourceName. -// -// WatchCluster can be called multiple times, with same or different -// clusterNames. Each call will start an independent watcher for the resource. -func (c *clientImpl) WatchCluster(resourceName string, cb func(xdsresource.ClusterUpdate, error)) (cancel func()) { - watcher := &clusterWatcher{resourceName: resourceName, cb: cb} - return xdsresource.WatchCluster(c, resourceName, watcher) -} - // WatchResource uses xDS to discover the resource associated with the provided // resource name. The resource type implementation determines how xDS requests // are sent out and how responses are deserialized and validated. Upon receipt diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go index 89ffc4fcec661..4b8ca29ce93f3 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/loadreport.go @@ -25,6 +25,7 @@ import ( "time" "github.com/golang/protobuf/ptypes" + "google.golang.org/grpc/internal/backoff" "google.golang.org/grpc/internal/grpcsync" "google.golang.org/grpc/internal/pretty" "google.golang.org/grpc/xds/internal" @@ -100,54 +101,36 @@ func (t *Transport) lrsRunner(ctx context.Context) { node := proto.Clone(t.nodeProto).(*v3corepb.Node) node.ClientFeatures = append(node.ClientFeatures, "envoy.lrs.supports_send_all_clusters") - backoffAttempt := 0 - backoffTimer := time.NewTimer(0) - for ctx.Err() == nil { - select { - case <-backoffTimer.C: - case <-ctx.Done(): - backoffTimer.Stop() - return + runLoadReportStream := func() error { + // streamCtx is created and canceled in case we terminate the stream + // early for any reason, to avoid gRPC-Go leaking the RPC's monitoring + // goroutine. + streamCtx, cancel := context.WithCancel(ctx) + defer cancel() + stream, err := v3lrsgrpc.NewLoadReportingServiceClient(t.cc).StreamLoadStats(streamCtx) + if err != nil { + t.logger.Warningf("Creating LRS stream to server %q failed: %v", t.serverURI, err) + return nil } + t.logger.Infof("Created LRS stream to server %q", t.serverURI) - // We reset backoff state when we successfully receive at least one - // message from the server. - resetBackoff := func() bool { - // streamCtx is created and canceled in case we terminate the stream - // early for any reason, to avoid gRPC-Go leaking the RPC's monitoring - // goroutine. - streamCtx, cancel := context.WithCancel(ctx) - defer cancel() - stream, err := v3lrsgrpc.NewLoadReportingServiceClient(t.cc).StreamLoadStats(streamCtx) - if err != nil { - t.logger.Warningf("Creating LRS stream to server %q failed: %v", t.serverURI, err) - return false - } - t.logger.Infof("Created LRS stream to server %q", t.serverURI) - - if err := t.sendFirstLoadStatsRequest(stream, node); err != nil { - t.logger.Warningf("Sending first LRS request failed: %v", err) - return false - } - - clusters, interval, err := t.recvFirstLoadStatsResponse(stream) - if err != nil { - t.logger.Warningf("Reading from LRS stream failed: %v", err) - return false - } - - t.sendLoads(streamCtx, stream, clusters, interval) - return true - }() + if err := t.sendFirstLoadStatsRequest(stream, node); err != nil { + t.logger.Warningf("Sending first LRS request failed: %v", err) + return nil + } - if resetBackoff { - backoffTimer.Reset(0) - backoffAttempt = 0 - } else { - backoffTimer.Reset(t.backoff(backoffAttempt)) - backoffAttempt++ + clusters, interval, err := t.recvFirstLoadStatsResponse(stream) + if err != nil { + t.logger.Warningf("Reading from LRS stream failed: %v", err) + return nil } + + // We reset backoff state when we successfully receive at least one + // message from the server. + t.sendLoads(streamCtx, stream, clusters, interval) + return backoff.ErrResetBackoff } + backoff.RunF(ctx, runLoadReportStream, t.backoff) } func (t *Transport) sendLoads(ctx context.Context, stream lrsStream, clusterNames []string, interval time.Duration) { diff --git a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go index 86803588a7cc2..001552d7b4798 100644 --- a/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go +++ b/vendor/google.golang.org/grpc/xds/internal/xdsclient/transport/transport.go @@ -325,43 +325,29 @@ func (t *Transport) adsRunner(ctx context.Context) { go t.send(ctx) - backoffAttempt := 0 - backoffTimer := time.NewTimer(0) - for ctx.Err() == nil { - select { - case <-backoffTimer.C: - case <-ctx.Done(): - backoffTimer.Stop() - return + // We reset backoff state when we successfully receive at least one + // message from the server. + runStreamWithBackoff := func() error { + stream, err := t.newAggregatedDiscoveryServiceStream(ctx, t.cc) + if err != nil { + t.onErrorHandler(err) + t.logger.Warningf("Creating new ADS stream failed: %v", err) + return nil } + t.logger.Infof("ADS stream created") - // We reset backoff state when we successfully receive at least one - // message from the server. - resetBackoff := func() bool { - stream, err := t.newAggregatedDiscoveryServiceStream(ctx, t.cc) - if err != nil { - t.onErrorHandler(err) - t.logger.Warningf("Creating new ADS stream failed: %v", err) - return false - } - t.logger.Infof("ADS stream created") - - select { - case <-t.adsStreamCh: - default: - } - t.adsStreamCh <- stream - return t.recv(stream) - }() - - if resetBackoff { - backoffTimer.Reset(0) - backoffAttempt = 0 - } else { - backoffTimer.Reset(t.backoff(backoffAttempt)) - backoffAttempt++ + select { + case <-t.adsStreamCh: + default: + } + t.adsStreamCh <- stream + msgReceived := t.recv(stream) + if msgReceived { + return backoff.ErrResetBackoff } + return nil } + backoff.RunF(ctx, runStreamWithBackoff, t.backoff) } // send is a separate goroutine for sending resource requests on the ADS stream. diff --git a/vendor/modules.txt b/vendor/modules.txt index ae84c85f97224..76a30c5a6689e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -464,7 +464,7 @@ github.com/coreos/go-systemd/sdjournal ## explicit; go 1.12 github.com/coreos/go-systemd/v22/activation github.com/coreos/go-systemd/v22/journal -# github.com/cristalhq/hedgedhttp v0.7.2 +# github.com/cristalhq/hedgedhttp v0.9.1 ## explicit; go 1.16 github.com/cristalhq/hedgedhttp # github.com/d4l3k/messagediff v1.2.1 @@ -816,7 +816,7 @@ github.com/google/s2a-go/internal/v2/certverifier github.com/google/s2a-go/internal/v2/remotesigner github.com/google/s2a-go/internal/v2/tlsconfigstore github.com/google/s2a-go/stream -# github.com/google/uuid v1.3.0 +# github.com/google/uuid v1.3.1 ## explicit github.com/google/uuid # github.com/googleapis/enterprise-certificate-proxy v0.2.5 @@ -853,8 +853,8 @@ github.com/gorilla/websocket # github.com/grafana/cloudflare-go v0.0.0-20230110200409-c627cf6792f2 ## explicit; go 1.17 github.com/grafana/cloudflare-go -# github.com/grafana/dskit v0.0.0-20231017083947-7b512eb54d47 -## explicit; go 1.19 +# github.com/grafana/dskit v0.0.0-20231120170505-765e343eda4f +## explicit; go 1.20 github.com/grafana/dskit/aws github.com/grafana/dskit/backoff github.com/grafana/dskit/concurrency @@ -1615,8 +1615,8 @@ golang.org/x/net/netutil golang.org/x/net/proxy golang.org/x/net/publicsuffix golang.org/x/net/trace -# golang.org/x/oauth2 v0.10.0 -## explicit; go 1.17 +# golang.org/x/oauth2 v0.11.0 +## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/authhandler golang.org/x/oauth2/clientcredentials @@ -1736,7 +1736,7 @@ google.golang.org/genproto/googleapis/type/date google.golang.org/genproto/googleapis/type/expr google.golang.org/genproto/internal google.golang.org/genproto/protobuf/field_mask -# google.golang.org/genproto/googleapis/api v0.0.0-20230803162519-f966b187b2e5 +# google.golang.org/genproto/googleapis/api v0.0.0-20230822172742-b8732ec3820d ## explicit; go 1.19 google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations @@ -1746,7 +1746,7 @@ google.golang.org/genproto/googleapis/api/expr/v1alpha1 google.golang.org/genproto/googleapis/rpc/code google.golang.org/genproto/googleapis/rpc/errdetails google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.58.3 +# google.golang.org/grpc v1.59.0 ## explicit; go 1.19 google.golang.org/grpc google.golang.org/grpc/attributes